Find the ODL owner of the BGP entity in the
[sdnvpn.git] / sdnvpn / lib / utils.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 import json
11 import logging
12 import os
13 import time
14 import requests
15 import re
16 import subprocess
17 from concurrent.futures import ThreadPoolExecutor
18 from openstack.exceptions import ResourceNotFound
19 from requests.auth import HTTPBasicAuth
20
21 from opnfv.deployment.factory import Factory as DeploymentFactory
22
23 from sdnvpn.lib import config as sdnvpn_config
24 import sdnvpn.lib.openstack_utils as os_utils
25
26 logger = logging.getLogger('sdnvpn_test_utils')
27
28 common_config = sdnvpn_config.CommonConfig()
29
30 ODL_USER = 'admin'
31 ODL_PASS = 'admin'
32
33 executor = ThreadPoolExecutor(5)
34
35
36 class ExtraRoute(object):
37     """
38     Class to represent extra route for a router
39     """
40
41     def __init__(self, destination, nexthop):
42         self.destination = destination
43         self.nexthop = nexthop
44
45
46 class AllowedAddressPair(object):
47     """
48     Class to represent allowed address pair for a neutron port
49     """
50
51     def __init__(self, ipaddress, macaddress):
52         self.ipaddress = ipaddress
53         self.macaddress = macaddress
54
55
56 def create_default_flavor():
57     return os_utils.get_or_create_flavor(common_config.default_flavor,
58                                          common_config.default_flavor_ram,
59                                          common_config.default_flavor_disk,
60                                          common_config.default_flavor_vcpus)
61
62
63 def create_custom_flavor():
64     return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
65                                          common_config.custom_flavor_ram,
66                                          common_config.custom_flavor_disk,
67                                          common_config.custom_flavor_vcpus)
68
69
70 def create_net(conn, name):
71     logger.debug("Creating network %s", name)
72     net_id = os_utils.create_neutron_net(conn, name)
73     if not net_id:
74         logger.error(
75             "There has been a problem when creating the neutron network")
76         raise Exception("There has been a problem when creating"
77                         " the neutron network {}".format(name))
78     return net_id
79
80
81 def create_subnet(conn, name, cidr, net_id):
82     logger.debug("Creating subnet %s in network %s with cidr %s",
83                  name, net_id, cidr)
84     subnet_id = os_utils.create_neutron_subnet(conn,
85                                                name,
86                                                cidr,
87                                                net_id)
88     if not subnet_id:
89         logger.error(
90             "There has been a problem when creating the neutron subnet")
91         raise Exception("There has been a problem when creating"
92                         " the neutron subnet {}".format(name))
93     return subnet_id
94
95
96 def create_network(conn, net, subnet1, cidr1,
97                    router, subnet2=None, cidr2=None):
98     """Network assoc won't work for networks/subnets created by this function.
99     It is an ODL limitation due to it handling routers as vpns.
100     See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
101     network_dic = os_utils.create_network_full(conn,
102                                                net,
103                                                subnet1,
104                                                router,
105                                                cidr1)
106     if not network_dic:
107         logger.error(
108             "There has been a problem when creating the neutron network")
109         raise Exception("There has been a problem when creating"
110                         " the neutron network {}".format(net))
111     net_id = network_dic["net_id"]
112     subnet_id = network_dic["subnet_id"]
113     router_id = network_dic["router_id"]
114
115     if subnet2 is not None:
116         logger.debug("Creating and attaching a second subnet...")
117         subnet_id = os_utils.create_neutron_subnet(
118             conn, subnet2, cidr2, net_id)
119         if not subnet_id:
120             logger.error(
121                 "There has been a problem when creating the second subnet")
122             raise Exception("There has been a problem when creating"
123                             " the second subnet {}".format(subnet2))
124         logger.debug("Subnet '%s' created successfully" % subnet_id)
125     return net_id, subnet_id, router_id
126
127
128 def get_port(conn, instance_id):
129     ports = os_utils.get_port_list(conn)
130     for port in ports:
131         if port.device_id == instance_id:
132             return port
133     return None
134
135
136 def update_port_allowed_address_pairs(conn, port_id, address_pairs):
137     if len(address_pairs) <= 0:
138         return
139     allowed_address_pairs = []
140     for address_pair in address_pairs:
141         address_pair_dict = {'ip_address': address_pair.ipaddress,
142                              'mac_address': address_pair.macaddress}
143         allowed_address_pairs.append(address_pair_dict)
144
145     try:
146         port = conn.network.\
147             update_port(port_id, allowed_address_pairs=allowed_address_pairs)
148         return port.id
149     except Exception as e:
150         logger.error("Error [update_neutron_port(network, '%s', '%s')]:"
151                      " %s" % (port_id, address_pairs, e))
152         return None
153
154
155 def create_instance(conn,
156                     name,
157                     image_id,
158                     network_id,
159                     sg_id,
160                     secgroup_name=None,
161                     fixed_ip=None,
162                     compute_node=None,
163                     userdata=None,
164                     files=[],
165                     **kwargs
166                     ):
167     if 'flavor' not in kwargs:
168         kwargs['flavor'] = common_config.default_flavor
169
170     logger.info("Creating instance '%s'..." % name)
171     logger.debug(
172         "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
173         " network=%s\n secgroup=%s \n hypervisor=%s \n"
174         " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
175         % (name, kwargs['flavor'], image_id, network_id, sg_id,
176            compute_node, fixed_ip, files, userdata))
177     instance = os_utils.create_instance_and_wait_for_active(
178         kwargs['flavor'],
179         image_id,
180         network_id,
181         name,
182         config_drive=True,
183         userdata=userdata,
184         av_zone=compute_node,
185         fixed_ip=fixed_ip,
186         files=files)
187
188     if instance is None:
189         logger.error("Error while booting instance.")
190         raise Exception("Error while booting instance {}".format(name))
191     else:
192         # Retrieve IP of INSTANCE
193         network_name = conn.network.get_network(network_id).name
194         instance_ip = conn.compute.get_server(instance).\
195             addresses.get(network_name)[0]['addr']
196         logger.debug("Instance '%s' booted successfully. IP='%s'." %
197                      (name, instance_ip))
198
199     if secgroup_name:
200         logger.debug("Adding '%s' to security group '%s'..."
201                      % (name, secgroup_name))
202     else:
203         logger.debug("Adding '%s' to security group '%s'..."
204                      % (name, sg_id))
205     os_utils.add_secgroup_to_instance(conn, instance.id, sg_id)
206
207     return instance
208
209
210 def generate_ping_userdata(ips_array, ping_count=10):
211     ips = ""
212     for ip in ips_array:
213         ips = ("%s %s" % (ips, ip))
214
215     ips = ips.replace('  ', ' ')
216     return ("#!/bin/sh\n"
217             "set%s\n"
218             "while true; do\n"
219             " for i do\n"
220             "  ip=$i\n"
221             "  ping -c %s $ip 2>&1 >/dev/null\n"
222             "  RES=$?\n"
223             "  if [ \"Z$RES\" = \"Z0\" ] ; then\n"
224             "   echo ping $ip OK\n"
225             "  else echo ping $ip KO\n"
226             "  fi\n"
227             " done\n"
228             " sleep 1\n"
229             "done\n"
230             % (ips, ping_count))
231
232
233 def generate_userdata_common():
234     return ("#!/bin/sh\n"
235             "sudo mkdir -p /home/cirros/.ssh/\n"
236             "sudo chown cirros:cirros /home/cirros/.ssh/\n"
237             "sudo chown cirros:cirros /home/cirros/id_rsa\n"
238             "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
239             "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
240             "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
241             "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
242             "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
243             "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
244             "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
245             "chmod 700 /home/cirros/.ssh\n"
246             "chmod 644 /home/cirros/.ssh/authorized_keys\n"
247             "chmod 600 /home/cirros/.ssh/id_rsa\n"
248             )
249
250
251 def generate_userdata_with_ssh(ips_array):
252     u1 = generate_userdata_common()
253
254     ips = ""
255     for ip in ips_array:
256         ips = ("%s %s" % (ips, ip))
257
258     ips = ips.replace('  ', ' ')
259     u2 = ("#!/bin/sh\n"
260           "set%s\n"
261           "while true; do\n"
262           " for i do\n"
263           "  ip=$i\n"
264           "  hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
265           "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
266           "  RES=$?\n"
267           "  if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
268           "  else echo $ip 'not reachable';fi;\n"
269           " done\n"
270           " sleep 1\n"
271           "done\n"
272           % ips)
273     return (u1 + u2)
274
275
276 def generate_userdata_interface_create(interface_name, interface_number,
277                                        ip_Address, net_mask):
278     return ("#!/bin/sh\n"
279             "set -xe\n"
280             "sudo useradd -m sdnvpn\n"
281             "sudo adduser sdnvpn sudo\n"
282             "sudo echo sdnvpn:opnfv | chpasswd\n"
283             "sleep 20\n"
284             "sudo ifconfig %s:%s %s netmask %s up\n"
285             % (interface_name, interface_number,
286                ip_Address, net_mask))
287
288
289 def get_installerHandler():
290     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
291     installer_ip = get_installer_ip()
292
293     if installer_type not in ["fuel", "apex"]:
294         logger.warn("installer type %s is neither fuel nor apex."
295                     "returning None for installer handler" % installer_type)
296         return None
297     else:
298         if installer_type in ["apex"]:
299             developHandler = DeploymentFactory.get_handler(
300                 installer_type,
301                 installer_ip,
302                 'root',
303                 pkey_file="/root/.ssh/id_rsa")
304
305         if installer_type in ["fuel"]:
306             developHandler = DeploymentFactory.get_handler(
307                 installer_type,
308                 installer_ip,
309                 'root',
310                 'r00tme')
311         return developHandler
312
313
314 def get_nodes():
315     developHandler = get_installerHandler()
316     return developHandler.get_nodes()
317
318
319 def get_installer_ip():
320     return str(os.environ['INSTALLER_IP'])
321
322
323 def get_instance_ip(conn, instance):
324     instance_ip = conn.compute.get_server(instance).\
325         addresses.values()[0][0]['addr']
326     return instance_ip
327
328
329 def wait_for_instance(instance, pattern=".* login:", tries=40):
330     logger.info("Waiting for instance %s to boot up" % instance.id)
331     conn = os_utils.get_os_connection()
332     sleep_time = 2
333     expected_regex = re.compile(pattern)
334     console_log = ""
335     while tries > 0 and not expected_regex.search(console_log):
336         console_log = conn.compute.\
337             get_server_console_output(instance)['output']
338         time.sleep(sleep_time)
339         tries -= 1
340
341     if not expected_regex.search(console_log):
342         logger.error("Instance %s does not boot up properly."
343                      % instance.id)
344         return False
345     return True
346
347
348 def wait_for_instances_up(*instances):
349     check = [wait_for_instance(instance) for instance in instances]
350     return all(check)
351
352
353 def wait_for_instances_get_dhcp(*instances):
354     check = [wait_for_instance(instance, "Lease of .* obtained")
355              for instance in instances]
356     return all(check)
357
358
359 def async_Wait_for_instances(instances, tries=40):
360     if len(instances) <= 0:
361         return
362     futures = []
363     for instance in instances:
364         future = executor.submit(wait_for_instance,
365                                  instance,
366                                  ".* login:",
367                                  tries)
368         futures.append(future)
369     results = []
370     for future in futures:
371         results.append(future.result())
372     if False in results:
373         logger.error("one or more instances is not yet booted up")
374
375
376 def wait_for_instance_delete(conn, instance_id, tries=30):
377     sleep_time = 2
378     instances = [instance_id]
379     logger.debug("Waiting for instance %s to be deleted"
380                  % (instance_id))
381     while tries > 0 and instance_id in instances:
382         instances = [instance.id for instance in
383                      os_utils.get_instances(conn)]
384         time.sleep(sleep_time)
385         tries -= 1
386     if instance_id in instances:
387         logger.error("Deletion of instance %s failed" %
388                      (instance_id))
389
390
391 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
392     tries = 30
393     sleep_time = 1
394     nets = []
395     logger.debug("Waiting for network %s to associate with BGPVPN %s "
396                  % (bgpvpn_id, net_id))
397
398     while tries > 0 and net_id not in nets:
399         nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
400         time.sleep(sleep_time)
401         tries -= 1
402     if net_id not in nets:
403         logger.error("Association of network %s with BGPVPN %s failed" %
404                      (net_id, bgpvpn_id))
405         return False
406     return True
407
408
409 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
410     check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
411              for id in args]
412     # Return True if all associations succeeded
413     return all(check)
414
415
416 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
417     tries = 30
418     sleep_time = 1
419     routers = []
420     logger.debug("Waiting for router %s to associate with BGPVPN %s "
421                  % (bgpvpn_id, router_id))
422     while tries > 0 and router_id not in routers:
423         routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
424         time.sleep(sleep_time)
425         tries -= 1
426     if router_id not in routers:
427         logger.error("Association of router %s with BGPVPN %s failed" %
428                      (router_id, bgpvpn_id))
429         return False
430     return True
431
432
433 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
434     check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
435              for id in args]
436     # Return True if all associations succeeded
437     return all(check)
438
439
440 def wait_before_subtest(*args, **kwargs):
441     ''' This is a placeholder.
442         TODO: Replace delay with polling logic. '''
443     time.sleep(30)
444
445
446 def assert_and_get_compute_nodes(conn, required_node_number=2):
447     """Get the compute nodes in the deployment
448     Exit if the deployment doesn't have enough compute nodes"""
449     compute_nodes = os_utils.get_hypervisors(conn)
450
451     num_compute_nodes = len(compute_nodes)
452     if num_compute_nodes < 2:
453         logger.error("There are %s compute nodes in the deployment. "
454                      "Minimum number of nodes to complete the test is 2."
455                      % num_compute_nodes)
456         raise Exception("There are {} compute nodes in the deployment. "
457                         "Minimum number of nodes to complete the test"
458                         " is 2.".format(num_compute_nodes))
459
460     logger.debug("Compute nodes: %s" % compute_nodes)
461     return compute_nodes
462
463
464 def open_icmp(conn, security_group_id):
465     if os_utils.check_security_group_rules(conn,
466                                            security_group_id,
467                                            'ingress',
468                                            'icmp'):
469
470         if not os_utils.create_secgroup_rule(conn,
471                                              security_group_id,
472                                              'ingress',
473                                              'icmp'):
474             logger.error("Failed to create icmp security group rule...")
475     else:
476         logger.info("This rule exists for security group: %s"
477                     % security_group_id)
478
479
480 def open_http_port(conn, security_group_id):
481     if os_utils.check_security_group_rules(conn,
482                                            security_group_id,
483                                            'ingress',
484                                            'tcp',
485                                            80, 80):
486
487         if not os_utils.create_secgroup_rule(conn,
488                                              security_group_id,
489                                              'ingress',
490                                              'tcp',
491                                              80, 80):
492
493             logger.error("Failed to create http security group rule...")
494     else:
495         logger.info("This rule exists for security group: %s"
496                     % security_group_id)
497
498
499 def open_bgp_port(conn, security_group_id):
500     if os_utils.check_security_group_rules(conn,
501                                            security_group_id,
502                                            'ingress',
503                                            'tcp',
504                                            179, 179):
505
506         if not os_utils.create_secgroup_rule(conn,
507                                              security_group_id,
508                                              'ingress',
509                                              'tcp',
510                                              179, 179):
511             logger.error("Failed to create bgp security group rule...")
512     else:
513         logger.info("This rule exists for security group: %s"
514                     % security_group_id)
515
516
517 def exec_cmd(cmd, verbose):
518     success = True
519     logger.debug("Executing '%s'" % cmd)
520     p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
521                          stderr=subprocess.STDOUT)
522     output = ""
523     for line in iter(p.stdout.readline, b''):
524         output += line
525
526     if verbose:
527         logger.debug(output)
528
529     p.stdout.close()
530     returncode = p.wait()
531     if returncode != 0:
532         logger.error("Command %s failed to execute." % cmd)
533         success = False
534
535     return output, success
536
537
538 def check_odl_fib(ip, controller_ip):
539     """Check that there is an entry in the ODL Fib for `ip`"""
540     url = "http://" + controller_ip + \
541           ":8181/restconf/config/odl-fib:fibEntries/"
542     logger.debug("Querring '%s' for FIB entries", url)
543     res = requests.get(url, auth=(ODL_USER, ODL_PASS))
544     if res.status_code != 200:
545         logger.error("OpenDaylight response status code: %s", res.status_code)
546         return False
547     logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
548                  % controller_ip)
549     logger.debug("OpenDaylight FIB: \n%s" % res.text)
550     return ip in res.text
551
552
553 def run_odl_cmd(odl_node, cmd):
554     '''Run a command in the OpenDaylight Karaf shell
555     This is a bit flimsy because of shell quote escaping, make sure that
556     the cmd passed does not have any top level double quotes or this
557     function will break.
558     The /dev/null is used because client works, but outputs something
559     that contains "ERROR" and run_cmd doesn't like that.
560     '''
561     karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
562                  ' 2>/dev/null' % cmd)
563     return odl_node.run_cmd(karaf_cmd)
564
565
566 def wait_for_cloud_init(conn, instance):
567     success = True
568     # ubuntu images take a long time to start
569     tries = 20
570     sleep_time = 30
571     logger.info("Waiting for cloud init of instance: {}"
572                 "".format(instance.name))
573     while tries > 0:
574         instance_log = conn.compute.\
575             get_server_console_output(instance)['output']
576         if "Failed to run module" in instance_log:
577             success = False
578             logger.error("Cloud init failed to run. Reason: %s",
579                          instance_log)
580             break
581         if re.search(r"Cloud-init v. .+ finished at", instance_log):
582             success = True
583             break
584         time.sleep(sleep_time)
585         tries = tries - 1
586
587     if tries == 0:
588         logger.error("Cloud init timed out"
589                      ". Reason: %s",
590                      instance_log)
591         success = False
592     logger.info("Finished waiting for cloud init of instance {} result was {}"
593                 "".format(instance.name, success))
594     return success
595
596
597 def attach_instance_to_ext_br(instance, compute_node):
598     libvirt_instance_name = instance.instance_name
599     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
600     if installer_type == "fuel":
601         bridge = "br-ex"
602     elif installer_type == "apex":
603         # In Apex, br-ex is an ovs bridge and virsh attach-interface
604         # won't just work. We work around it by creating a linux
605         # bridge, attaching that to br-ex with a veth pair
606         # and virsh-attaching the instance to the linux-bridge
607         bridge = "br-quagga"
608         cmd = """
609         set -e
610         if ! sudo brctl show |grep -q ^{bridge};then
611           sudo brctl addbr {bridge}
612           sudo ip link set {bridge} up
613           sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
614           sudo ip link set dev ovs-quagga-tap up
615           sudo ip link set dev quagga-tap up
616           sudo ovs-vsctl add-port br-ex ovs-quagga-tap
617           sudo brctl addif {bridge} quagga-tap
618         fi
619         """
620         compute_node.run_cmd(cmd.format(bridge=bridge))
621
622     compute_node.run_cmd("sudo virsh attach-interface %s"
623                          " bridge %s" % (libvirt_instance_name, bridge))
624
625
626 def detach_instance_from_ext_br(instance, compute_node):
627     libvirt_instance_name = instance.instance_name
628     mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
629                                "grep running | awk '{print $2}'); "
630                                "do echo -n ; sudo virsh dumpxml $vm| "
631                                "grep -oP '52:54:[\da-f:]+' ;done")
632     compute_node.run_cmd("sudo virsh detach-interface --domain %s"
633                          " --type bridge --mac %s"
634                          % (libvirt_instance_name, mac))
635
636     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
637     if installer_type == "fuel":
638         bridge = "br-ex"
639     elif installer_type == "apex":
640         # In Apex, br-ex is an ovs bridge and virsh attach-interface
641         # won't just work. We work around it by creating a linux
642         # bridge, attaching that to br-ex with a veth pair
643         # and virsh-attaching the instance to the linux-bridge
644         bridge = "br-quagga"
645         cmd = """
646             sudo brctl delif {bridge} quagga-tap &&
647             sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
648             sudo ip link set dev quagga-tap down &&
649             sudo ip link set dev ovs-quagga-tap down &&
650             sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
651             sudo ip link set {bridge} down &&
652             sudo brctl delbr {bridge}
653         """
654         compute_node.run_cmd(cmd.format(bridge=bridge))
655
656
657 def cleanup_neutron(conn, neutron_client, floatingip_ids, bgpvpn_ids,
658                     interfaces, subnet_ids, router_ids, network_ids):
659     if len(floatingip_ids) != 0:
660         for floatingip_id in floatingip_ids:
661             if not os_utils.delete_floating_ip(conn, floatingip_id):
662                 logger.error('Fail to delete all floating ips. '
663                              'Floating ip with id {} was not deleted.'.
664                              format(floatingip_id))
665                 return False
666
667     if len(bgpvpn_ids) != 0:
668         for bgpvpn_id in bgpvpn_ids:
669             delete_bgpvpn(neutron_client, bgpvpn_id)
670
671     if len(interfaces) != 0:
672         for router_id, subnet_id in interfaces:
673             if not os_utils.remove_interface_router(conn,
674                                                     router_id, subnet_id):
675                 logger.error('Fail to delete all interface routers. '
676                              'Interface router with id {} was not deleted.'.
677                              format(router_id))
678
679     if len(router_ids) != 0:
680         for router_id in router_ids:
681             if not os_utils.remove_gateway_router(conn, router_id):
682                 logger.error('Fail to delete all gateway routers. '
683                              'Gateway router with id {} was not deleted.'.
684                              format(router_id))
685
686     if len(subnet_ids) != 0:
687         for subnet_id in subnet_ids:
688             if not os_utils.delete_neutron_subnet(conn, subnet_id):
689                 logger.error('Fail to delete all subnets. '
690                              'Subnet with id {} was not deleted.'.
691                              format(subnet_id))
692                 return False
693
694     if len(router_ids) != 0:
695         for router_id in router_ids:
696             if not os_utils.delete_neutron_router(conn, router_id):
697                 logger.error('Fail to delete all routers. '
698                              'Router with id {} was not deleted.'.
699                              format(router_id))
700                 return False
701
702     if len(network_ids) != 0:
703         for network_id in network_ids:
704             if not os_utils.delete_neutron_net(conn, network_id):
705                 logger.error('Fail to delete all networks. '
706                              'Network with id {} was not deleted.'.
707                              format(network_id))
708                 return False
709     return True
710
711
712 def cleanup_nova(conn, instance_ids, flavor_ids=None):
713     if flavor_ids is not None and len(flavor_ids) != 0:
714         for flavor_id in flavor_ids:
715             conn.compute.delete_flavor(flavor_id)
716     if len(instance_ids) != 0:
717         for instance_id in instance_ids:
718             if not os_utils.delete_instance(conn, instance_id):
719                 logger.error('Fail to delete all instances. '
720                              'Instance with id {} was not deleted.'.
721                              format(instance_id))
722             else:
723                 wait_for_instance_delete(conn, instance_id)
724     return True
725
726
727 def cleanup_glance(conn, image_ids):
728     if len(image_ids) != 0:
729         for image_id in image_ids:
730             if not os_utils.delete_glance_image(conn, image_id):
731                 logger.error('Fail to delete all images. '
732                              'Image with id {} was not deleted.'.
733                              format(image_id))
734                 return False
735     return True
736
737
738 def create_bgpvpn(neutron_client, **kwargs):
739     # route_distinguishers
740     # route_targets
741     json_body = {"bgpvpn": kwargs}
742     return neutron_client.create_bgpvpn(json_body)
743
744
745 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
746     json_body = {"bgpvpn": kwargs}
747     return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
748
749
750 def delete_bgpvpn(neutron_client, bgpvpn_id):
751     return neutron_client.delete_bgpvpn(bgpvpn_id)
752
753
754 def get_bgpvpn(neutron_client, bgpvpn_id):
755     return neutron_client.show_bgpvpn(bgpvpn_id)
756
757
758 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
759     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
760
761
762 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
763     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
764
765
766 def create_router_association(neutron_client, bgpvpn_id, router_id):
767     json_body = {"router_association": {"router_id": router_id}}
768     return neutron_client.create_router_association(bgpvpn_id, json_body)
769
770
771 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
772     json_body = {"network_association": {"network_id": neutron_network_id}}
773     return neutron_client.create_network_association(bgpvpn_id, json_body)
774
775
776 def is_fail_mode_secure():
777     """
778     Checks the value of the attribute fail_mode,
779     if it is set to secure. This check is performed
780     on all OVS br-int interfaces, for all OpenStack nodes.
781     """
782     is_secure = {}
783     openstack_nodes = get_nodes()
784     get_ovs_int_cmd = ("sudo ovs-vsctl show | "
785                        "grep -i bridge | "
786                        "awk '{print $2}'")
787     # Define OVS get fail_mode command
788     get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
789     for openstack_node in openstack_nodes:
790         if not openstack_node.is_active():
791             continue
792
793         ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
794                         strip().split('\n'))
795         if 'br-int' in ovs_int_list:
796             # Execute get fail_mode command
797             br_int_fail_mode = (openstack_node.
798                                 run_cmd(get_ovs_fail_mode_cmd).strip())
799             if br_int_fail_mode == 'secure':
800                 # success
801                 is_secure[openstack_node.name] = True
802             else:
803                 # failure
804                 logger.error('The fail_mode for br-int was not secure '
805                              'in {} node'.format(openstack_node.name))
806                 is_secure[openstack_node.name] = False
807     return is_secure
808
809
810 def update_nw_subnet_port_quota(conn, tenant_id, nw_quota,
811                                 subnet_quota, port_quota, router_quota):
812     try:
813         conn.network.update_quota(tenant_id, networks=nw_quota,
814                                   subnets=subnet_quota, ports=port_quota,
815                                   routers=router_quota)
816         return True
817     except Exception as e:
818         logger.error("Error [update_nw_subnet_port_quota(network,"
819                      " '%s', '%s', '%s', '%s, %s')]: %s" %
820                      (tenant_id, nw_quota, subnet_quota,
821                       port_quota, router_quota, e))
822         return False
823
824
825 def update_instance_quota_class(cloud, instances_quota):
826     try:
827         cloud.set_compute_quotas('admin', instances=instances_quota)
828         return True
829     except Exception as e:
830         logger.error("Error [update_instance_quota_class(compute,"
831                      " '%s' )]: %s" % (instances_quota, e))
832         return False
833
834
835 def get_neutron_quota(conn, tenant_id):
836     try:
837         return conn.network.get_quota(tenant_id)
838     except ResourceNotFound as e:
839         logger.error("Error in getting network quota for tenant "
840                      " '%s' )]: %s" % (tenant_id, e))
841         raise
842
843
844 def get_nova_instances_quota(cloud):
845     try:
846         return cloud.get_compute_quotas('admin').instances
847     except Exception as e:
848         logger.error("Error in getting nova instances quota: %s" % e)
849         raise
850
851
852 def update_router_extra_route(conn, router_id, extra_routes):
853     if len(extra_routes) <= 0:
854         return
855     routes_list = []
856     for extra_route in extra_routes:
857         route_dict = {'destination': extra_route.destination,
858                       'nexthop': extra_route.nexthop}
859         routes_list.append(route_dict)
860
861     try:
862         conn.network.update_router(router_id, routes=routes_list)
863         return True
864     except Exception as e:
865         logger.error("Error in updating router with extra route: %s" % e)
866         raise
867
868
869 def update_router_no_extra_route(conn, router_ids):
870     for router_id in router_ids:
871         try:
872             conn.network.update_router(router_id, routes=[])
873             return True
874         except Exception as e:
875             logger.error("Error in clearing extra route: %s" % e)
876
877
878 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
879     """
880     Gets, as input, a list of compute nodes and a list of OVS bridges
881     and returns the command console output, as a list of lines, that
882     contains all the OVS groups from all bridges and nodes in lists.
883     """
884     cmd_out_lines = []
885     for compute_node in compute_node_list:
886         for ovs_br in ovs_br_list:
887             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
888                 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
889                                   "grep group".format(ovs_br, of_protocol))
890                 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
891                                   split("\n"))
892     return cmd_out_lines
893
894
895 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
896     """
897     Gets, as input, a list of compute nodes and a list of OVS bridges
898     and returns the command console output, as a list of lines, that
899     contains all the OVS flows from all bridges and nodes in lists.
900     """
901     cmd_out_lines = []
902     for compute_node in compute_node_list:
903         for ovs_br in ovs_br_list:
904             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
905                 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
906                                  "grep table=".format(ovs_br, of_protocol))
907                 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
908                                   split("\n"))
909     return cmd_out_lines
910
911
912 def get_node_ip_and_netmask(node, iface):
913     cmd = "ip a | grep {iface} | grep inet | awk '{{print $2}}'"\
914           .format(iface=iface)
915     mgmt_net_cidr = node.run_cmd(cmd).strip().split('\n')
916     mgmt_ip = mgmt_net_cidr[0].split('/')[0]
917     mgmt_netmask = mgmt_net_cidr[0].split('/')[1]
918
919     return mgmt_ip, mgmt_netmask
920
921
922 def get_odl_bgp_entity_owner(controllers):
923     """ Finds the ODL owner of the BGP entity in the cluster.
924
925     When ODL runs in clustering mode we need to execute the BGP speaker
926     related commands to that ODL which is the owner of the BGP entity.
927
928     :param controllers: list of OS controllers
929     :return controller: OS controller in which ODL BGP entity owner runs
930     """
931     if len(controllers) == 1:
932         return controllers[0]
933     else:
934         installer_type = str(os.environ['INSTALLER_TYPE'].lower())
935         if installer_type in ['fuel']:
936             ip, _ = get_node_ip_and_netmask(controllers[0], 'br-ctl')
937             port = 8282
938             odl_pass = 'admin'
939         else:
940             ip = controllers[0].ip
941             port = 8081
942             odl_pass = os.environ['SDN_CONTROLLER_PASSWORD']
943         url = ('http://{user}:{password}@{ip}:{port}/restconf/'
944                'operational/entity-owners:entity-owners/entity-type/bgp'
945                .format(user='admin', password=odl_pass, ip=ip, port=port))
946
947         remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
948                                 'initial/akka.conf')
949         remote_odl_home_akka_conf = '/home/heat-admin/akka.conf'
950         local_tmp_akka_conf = '/tmp/akka.conf'
951         try:
952             json_output = requests.get(url).json()
953         except Exception:
954             logger.error('Failed to find the ODL BGP '
955                          'entity owner through REST')
956             return None
957         odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
958
959         get_odl_id_cmd = "sudo docker ps -qf name=opendaylight_api"
960         for controller in controllers:
961             odl_id = controller.run_cmd(get_odl_id_cmd)
962             controller.run_cmd('sudo docker cp {container_id}:{odl_akka_conf} '
963                                '/home/heat-admin/'
964                                .format(container_id=odl_id,
965                                        odl_akka_conf=remote_odl_akka_conf))
966             controller.run_cmd('sudo chmod 777 {0}'
967                                .format(remote_odl_home_akka_conf))
968             controller.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
969
970             for line in open(local_tmp_akka_conf):
971                 if re.search(odl_bgp_owner, line):
972                     return controller
973         return None
974
975
976 def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
977     json_body = {'input':
978                  {'destination-ip': remote_tep_ip,
979                   'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
980                  }
981     url = ('http://{ip}:8081/restconf/operations/'
982            'itm-rpc:add-external-tunnel-endpoint'.format(ip=controllers[0].ip))
983     headers = {'Content-type': 'application/yang.data+json',
984                'Accept': 'application/yang.data+json'}
985     try:
986         requests.post(url, data=json.dumps(json_body),
987                       headers=headers,
988                       auth=HTTPBasicAuth('admin', 'admin'))
989     except Exception as e:
990         logger.error("Failed to create external tunnel endpoint on"
991                      " ODL for external tep ip %s with error %s"
992                      % (remote_tep_ip, e))
993     return None
994
995
996 def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
997     url = ('http://admin:admin@{ip}:8081/restconf/config/odl-fib:fibEntries/'
998            'vrfTables/{vrf}/'.format(ip=controllers[0].ip, vrf=vrf_id))
999     logger.error("url is %s" % url)
1000     try:
1001         vrf_table = requests.get(url).json()
1002         is_ipprefix_exists = False
1003         for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
1004             if vrf_entry['destPrefix'] == ip_prefix:
1005                 is_ipprefix_exists = True
1006                 break
1007         return is_ipprefix_exists
1008     except Exception as e:
1009         logger.error('Failed to find ip prefix %s with error %s'
1010                      % (ip_prefix, e))
1011     return False