Replace glance client calls with openstack sdk
[sdnvpn.git] / sdnvpn / lib / utils.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 import json
11 import logging
12 import os
13 import time
14 import requests
15 import re
16 import subprocess
17 from concurrent.futures import ThreadPoolExecutor
18 from requests.auth import HTTPBasicAuth
19
20 from opnfv.deployment.factory import Factory as DeploymentFactory
21
22 from sdnvpn.lib import config as sdnvpn_config
23 import sdnvpn.lib.openstack_utils as os_utils
24
25 logger = logging.getLogger('sdnvpn_test_utils')
26
27 common_config = sdnvpn_config.CommonConfig()
28
29 ODL_USER = 'admin'
30 ODL_PASS = 'admin'
31
32 executor = ThreadPoolExecutor(5)
33
34
35 class ExtraRoute(object):
36     """
37     Class to represent extra route for a router
38     """
39
40     def __init__(self, destination, nexthop):
41         self.destination = destination
42         self.nexthop = nexthop
43
44
45 class AllowedAddressPair(object):
46     """
47     Class to represent allowed address pair for a neutron port
48     """
49
50     def __init__(self, ipaddress, macaddress):
51         self.ipaddress = ipaddress
52         self.macaddress = macaddress
53
54
55 def create_default_flavor():
56     return os_utils.get_or_create_flavor(common_config.default_flavor,
57                                          common_config.default_flavor_ram,
58                                          common_config.default_flavor_disk,
59                                          common_config.default_flavor_vcpus)
60
61
62 def create_custom_flavor():
63     return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
64                                          common_config.custom_flavor_ram,
65                                          common_config.custom_flavor_disk,
66                                          common_config.custom_flavor_vcpus)
67
68
69 def create_net(neutron_client, name):
70     logger.debug("Creating network %s", name)
71     net_id = os_utils.create_neutron_net(neutron_client, name)
72     if not net_id:
73         logger.error(
74             "There has been a problem when creating the neutron network")
75         raise Exception("There has been a problem when creating"
76                         " the neutron network {}".format(name))
77     return net_id
78
79
80 def create_subnet(neutron_client, name, cidr, net_id):
81     logger.debug("Creating subnet %s in network %s with cidr %s",
82                  name, net_id, cidr)
83     subnet_id = os_utils.create_neutron_subnet(neutron_client,
84                                                name,
85                                                cidr,
86                                                net_id)
87     if not subnet_id:
88         logger.error(
89             "There has been a problem when creating the neutron subnet")
90         raise Exception("There has been a problem when creating"
91                         " the neutron subnet {}".format(name))
92     return subnet_id
93
94
95 def create_network(neutron_client, net, subnet1, cidr1,
96                    router, subnet2=None, cidr2=None):
97     """Network assoc won't work for networks/subnets created by this function.
98     It is an ODL limitation due to it handling routers as vpns.
99     See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
100     network_dic = os_utils.create_network_full(neutron_client,
101                                                net,
102                                                subnet1,
103                                                router,
104                                                cidr1)
105     if not network_dic:
106         logger.error(
107             "There has been a problem when creating the neutron network")
108         raise Exception("There has been a problem when creating"
109                         " the neutron network {}".format(net))
110     net_id = network_dic["net_id"]
111     subnet_id = network_dic["subnet_id"]
112     router_id = network_dic["router_id"]
113
114     if subnet2 is not None:
115         logger.debug("Creating and attaching a second subnet...")
116         subnet_id = os_utils.create_neutron_subnet(
117             neutron_client, subnet2, cidr2, net_id)
118         if not subnet_id:
119             logger.error(
120                 "There has been a problem when creating the second subnet")
121             raise Exception("There has been a problem when creating"
122                             " the second subnet {}".format(subnet2))
123         logger.debug("Subnet '%s' created successfully" % subnet_id)
124     return net_id, subnet_id, router_id
125
126
127 def get_port(neutron_client, instance_id):
128     ports = os_utils.get_port_list(neutron_client)
129     if ports is not None:
130         for port in ports:
131             if port['device_id'] == instance_id:
132                 return port
133     return None
134
135
136 def update_port_allowed_address_pairs(neutron_client, port_id, address_pairs):
137     if len(address_pairs) <= 0:
138         return
139     allowed_address_pairs = []
140     for address_pair in address_pairs:
141         address_pair_dict = {'ip_address': address_pair.ipaddress,
142                              'mac_address': address_pair.macaddress}
143         allowed_address_pairs.append(address_pair_dict)
144     json_body = {'port': {
145         "allowed_address_pairs": allowed_address_pairs
146     }}
147
148     try:
149         port = neutron_client.update_port(port=port_id,
150                                           body=json_body)
151         return port['port']['id']
152     except Exception as e:
153         logger.error("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
154                      " %s" % (port_id, address_pairs, e))
155         return None
156
157
158 def create_instance(nova_client,
159                     name,
160                     image_id,
161                     network_id,
162                     sg_id,
163                     secgroup_name=None,
164                     fixed_ip=None,
165                     compute_node='',
166                     userdata=None,
167                     files=None,
168                     **kwargs
169                     ):
170     if 'flavor' not in kwargs:
171         kwargs['flavor'] = common_config.default_flavor
172
173     logger.info("Creating instance '%s'..." % name)
174     logger.debug(
175         "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
176         " network=%s\n secgroup=%s \n hypervisor=%s \n"
177         " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
178         % (name, kwargs['flavor'], image_id, network_id, sg_id,
179            compute_node, fixed_ip, files, userdata))
180     instance = os_utils.create_instance_and_wait_for_active(
181         kwargs['flavor'],
182         image_id,
183         network_id,
184         name,
185         config_drive=True,
186         userdata=userdata,
187         av_zone=compute_node,
188         fixed_ip=fixed_ip,
189         files=files)
190
191     if instance is None:
192         logger.error("Error while booting instance.")
193         raise Exception("Error while booting instance {}".format(name))
194     else:
195         logger.debug("Instance '%s' booted successfully. IP='%s'." %
196                      (name, instance.networks.itervalues().next()[0]))
197     # Retrieve IP of INSTANCE
198     # instance_ip = instance.networks.get(network_id)[0]
199
200     if secgroup_name:
201         logger.debug("Adding '%s' to security group '%s'..."
202                      % (name, secgroup_name))
203     else:
204         logger.debug("Adding '%s' to security group '%s'..."
205                      % (name, sg_id))
206     os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
207
208     return instance
209
210
211 def generate_ping_userdata(ips_array, ping_count=10):
212     ips = ""
213     for ip in ips_array:
214         ips = ("%s %s" % (ips, ip))
215
216     ips = ips.replace('  ', ' ')
217     return ("#!/bin/sh\n"
218             "set%s\n"
219             "while true; do\n"
220             " for i do\n"
221             "  ip=$i\n"
222             "  ping -c %s $ip 2>&1 >/dev/null\n"
223             "  RES=$?\n"
224             "  if [ \"Z$RES\" = \"Z0\" ] ; then\n"
225             "   echo ping $ip OK\n"
226             "  else echo ping $ip KO\n"
227             "  fi\n"
228             " done\n"
229             " sleep 1\n"
230             "done\n"
231             % (ips, ping_count))
232
233
234 def generate_userdata_common():
235     return ("#!/bin/sh\n"
236             "sudo mkdir -p /home/cirros/.ssh/\n"
237             "sudo chown cirros:cirros /home/cirros/.ssh/\n"
238             "sudo chown cirros:cirros /home/cirros/id_rsa\n"
239             "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
240             "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
241             "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
242             "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
243             "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
244             "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
245             "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
246             "chmod 700 /home/cirros/.ssh\n"
247             "chmod 644 /home/cirros/.ssh/authorized_keys\n"
248             "chmod 600 /home/cirros/.ssh/id_rsa\n"
249             )
250
251
252 def generate_userdata_with_ssh(ips_array):
253     u1 = generate_userdata_common()
254
255     ips = ""
256     for ip in ips_array:
257         ips = ("%s %s" % (ips, ip))
258
259     ips = ips.replace('  ', ' ')
260     u2 = ("#!/bin/sh\n"
261           "set%s\n"
262           "while true; do\n"
263           " for i do\n"
264           "  ip=$i\n"
265           "  hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
266           "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
267           "  RES=$?\n"
268           "  if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
269           "  else echo $ip 'not reachable';fi;\n"
270           " done\n"
271           " sleep 1\n"
272           "done\n"
273           % ips)
274     return (u1 + u2)
275
276
277 def generate_userdata_interface_create(interface_name, interface_number,
278                                        ip_Address, net_mask):
279     return ("#!/bin/sh\n"
280             "set -xe\n"
281             "sudo useradd -m sdnvpn\n"
282             "sudo adduser sdnvpn sudo\n"
283             "sudo echo sdnvpn:opnfv | chpasswd\n"
284             "sleep 20\n"
285             "sudo ifconfig %s:%s %s netmask %s up\n"
286             % (interface_name, interface_number,
287                ip_Address, net_mask))
288
289
290 def get_installerHandler():
291     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
292     installer_ip = get_installer_ip()
293
294     if installer_type not in ["fuel", "apex"]:
295         logger.warn("installer type %s is neither fuel nor apex."
296                     "returning None for installer handler" % installer_type)
297         return None
298     else:
299         if installer_type in ["apex"]:
300             developHandler = DeploymentFactory.get_handler(
301                 installer_type,
302                 installer_ip,
303                 'root',
304                 pkey_file="/root/.ssh/id_rsa")
305
306         if installer_type in ["fuel"]:
307             developHandler = DeploymentFactory.get_handler(
308                 installer_type,
309                 installer_ip,
310                 'root',
311                 'r00tme')
312         return developHandler
313
314
315 def get_nodes():
316     developHandler = get_installerHandler()
317     return developHandler.get_nodes()
318
319
320 def get_installer_ip():
321     return str(os.environ['INSTALLER_IP'])
322
323
324 def get_instance_ip(instance):
325     instance_ip = instance.networks.itervalues().next()[0]
326     return instance_ip
327
328
329 def wait_for_instance(instance, pattern=".* login:", tries=40):
330     logger.info("Waiting for instance %s to boot up" % instance.id)
331     sleep_time = 2
332     expected_regex = re.compile(pattern)
333     console_log = ""
334     while tries > 0 and not expected_regex.search(console_log):
335         console_log = instance.get_console_output()
336         time.sleep(sleep_time)
337         tries -= 1
338
339     if not expected_regex.search(console_log):
340         logger.error("Instance %s does not boot up properly."
341                      % instance.id)
342         return False
343     return True
344
345
346 def wait_for_instances_up(*instances):
347     check = [wait_for_instance(instance) for instance in instances]
348     return all(check)
349
350
351 def wait_for_instances_get_dhcp(*instances):
352     check = [wait_for_instance(instance, "Lease of .* obtained")
353              for instance in instances]
354     return all(check)
355
356
357 def async_Wait_for_instances(instances, tries=40):
358     if len(instances) <= 0:
359         return
360     futures = []
361     for instance in instances:
362         future = executor.submit(wait_for_instance,
363                                  instance,
364                                  ".* login:",
365                                  tries)
366         futures.append(future)
367     results = []
368     for future in futures:
369         results.append(future.result())
370     if False in results:
371         logger.error("one or more instances is not yet booted up")
372
373
374 def wait_for_instance_delete(nova_client, instance_id, tries=30):
375     sleep_time = 2
376     instances = [instance_id]
377     logger.debug("Waiting for instance %s to be deleted"
378                  % (instance_id))
379     while tries > 0 and instance_id in instances:
380         instances = [instance.id for instance in
381                      os_utils.get_instances(nova_client)]
382         time.sleep(sleep_time)
383         tries -= 1
384     if instance_id in instances:
385         logger.error("Deletion of instance %s failed" %
386                      (instance_id))
387
388
389 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
390     tries = 30
391     sleep_time = 1
392     nets = []
393     logger.debug("Waiting for network %s to associate with BGPVPN %s "
394                  % (bgpvpn_id, net_id))
395
396     while tries > 0 and net_id not in nets:
397         nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
398         time.sleep(sleep_time)
399         tries -= 1
400     if net_id not in nets:
401         logger.error("Association of network %s with BGPVPN %s failed" %
402                      (net_id, bgpvpn_id))
403         return False
404     return True
405
406
407 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
408     check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
409              for id in args]
410     # Return True if all associations succeeded
411     return all(check)
412
413
414 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
415     tries = 30
416     sleep_time = 1
417     routers = []
418     logger.debug("Waiting for router %s to associate with BGPVPN %s "
419                  % (bgpvpn_id, router_id))
420     while tries > 0 and router_id not in routers:
421         routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
422         time.sleep(sleep_time)
423         tries -= 1
424     if router_id not in routers:
425         logger.error("Association of router %s with BGPVPN %s failed" %
426                      (router_id, bgpvpn_id))
427         return False
428     return True
429
430
431 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
432     check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
433              for id in args]
434     # Return True if all associations succeeded
435     return all(check)
436
437
438 def wait_before_subtest(*args, **kwargs):
439     ''' This is a placeholder.
440         TODO: Replace delay with polling logic. '''
441     time.sleep(30)
442
443
444 def assert_and_get_compute_nodes(nova_client, required_node_number=2):
445     """Get the compute nodes in the deployment
446     Exit if the deployment doesn't have enough compute nodes"""
447     compute_nodes = os_utils.get_hypervisors(nova_client)
448
449     num_compute_nodes = len(compute_nodes)
450     if num_compute_nodes < 2:
451         logger.error("There are %s compute nodes in the deployment. "
452                      "Minimum number of nodes to complete the test is 2."
453                      % num_compute_nodes)
454         raise Exception("There are {} compute nodes in the deployment. "
455                         "Minimum number of nodes to complete the test"
456                         " is 2.".format(num_compute_nodes))
457
458     logger.debug("Compute nodes: %s" % compute_nodes)
459     return compute_nodes
460
461
462 def open_icmp(neutron_client, security_group_id):
463     if os_utils.check_security_group_rules(neutron_client,
464                                            security_group_id,
465                                            'ingress',
466                                            'icmp'):
467
468         if not os_utils.create_secgroup_rule(neutron_client,
469                                              security_group_id,
470                                              'ingress',
471                                              'icmp'):
472             logger.error("Failed to create icmp security group rule...")
473     else:
474         logger.info("This rule exists for security group: %s"
475                     % security_group_id)
476
477
478 def open_http_port(neutron_client, security_group_id):
479     if os_utils.check_security_group_rules(neutron_client,
480                                            security_group_id,
481                                            'ingress',
482                                            'tcp',
483                                            80, 80):
484
485         if not os_utils.create_secgroup_rule(neutron_client,
486                                              security_group_id,
487                                              'ingress',
488                                              'tcp',
489                                              80, 80):
490
491             logger.error("Failed to create http security group rule...")
492     else:
493         logger.info("This rule exists for security group: %s"
494                     % security_group_id)
495
496
497 def open_bgp_port(neutron_client, security_group_id):
498     if os_utils.check_security_group_rules(neutron_client,
499                                            security_group_id,
500                                            'ingress',
501                                            'tcp',
502                                            179, 179):
503
504         if not os_utils.create_secgroup_rule(neutron_client,
505                                              security_group_id,
506                                              'ingress',
507                                              'tcp',
508                                              179, 179):
509             logger.error("Failed to create bgp security group rule...")
510     else:
511         logger.info("This rule exists for security group: %s"
512                     % security_group_id)
513
514
515 def exec_cmd(cmd, verbose):
516     success = True
517     logger.debug("Executing '%s'" % cmd)
518     p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
519                          stderr=subprocess.STDOUT)
520     output = ""
521     for line in iter(p.stdout.readline, b''):
522         output += line
523
524     if verbose:
525         logger.debug(output)
526
527     p.stdout.close()
528     returncode = p.wait()
529     if returncode != 0:
530         logger.error("Command %s failed to execute." % cmd)
531         success = False
532
533     return output, success
534
535
536 def check_odl_fib(ip, controller_ip):
537     """Check that there is an entry in the ODL Fib for `ip`"""
538     url = "http://" + controller_ip + \
539           ":8181/restconf/config/odl-fib:fibEntries/"
540     logger.debug("Querring '%s' for FIB entries", url)
541     res = requests.get(url, auth=(ODL_USER, ODL_PASS))
542     if res.status_code != 200:
543         logger.error("OpenDaylight response status code: %s", res.status_code)
544         return False
545     logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
546                  % controller_ip)
547     logger.debug("OpenDaylight FIB: \n%s" % res.text)
548     return ip in res.text
549
550
551 def run_odl_cmd(odl_node, cmd):
552     '''Run a command in the OpenDaylight Karaf shell
553     This is a bit flimsy because of shell quote escaping, make sure that
554     the cmd passed does not have any top level double quotes or this
555     function will break.
556     The /dev/null is used because client works, but outputs something
557     that contains "ERROR" and run_cmd doesn't like that.
558     '''
559     karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
560                  ' 2>/dev/null' % cmd)
561     return odl_node.run_cmd(karaf_cmd)
562
563
564 def wait_for_cloud_init(instance):
565     success = True
566     # ubuntu images take a long time to start
567     tries = 20
568     sleep_time = 30
569     logger.info("Waiting for cloud init of instance: {}"
570                 "".format(instance.name))
571     while tries > 0:
572         instance_log = instance.get_console_output()
573         if "Failed to run module" in instance_log:
574             success = False
575             logger.error("Cloud init failed to run. Reason: %s",
576                          instance_log)
577             break
578         if re.search(r"Cloud-init v. .+ finished at", instance_log):
579             success = True
580             break
581         time.sleep(sleep_time)
582         tries = tries - 1
583
584     if tries == 0:
585         logger.error("Cloud init timed out"
586                      ". Reason: %s",
587                      instance_log)
588         success = False
589     logger.info("Finished waiting for cloud init of instance {} result was {}"
590                 "".format(instance.name, success))
591     return success
592
593
594 def attach_instance_to_ext_br(instance, compute_node):
595     libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
596     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
597     if installer_type == "fuel":
598         bridge = "br-ex"
599     elif installer_type == "apex":
600         # In Apex, br-ex is an ovs bridge and virsh attach-interface
601         # won't just work. We work around it by creating a linux
602         # bridge, attaching that to br-ex with a veth pair
603         # and virsh-attaching the instance to the linux-bridge
604         bridge = "br-quagga"
605         cmd = """
606         set -e
607         if ! sudo brctl show |grep -q ^{bridge};then
608           sudo brctl addbr {bridge}
609           sudo ip link set {bridge} up
610           sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
611           sudo ip link set dev ovs-quagga-tap up
612           sudo ip link set dev quagga-tap up
613           sudo ovs-vsctl add-port br-ex ovs-quagga-tap
614           sudo brctl addif {bridge} quagga-tap
615         fi
616         """
617         compute_node.run_cmd(cmd.format(bridge=bridge))
618
619     compute_node.run_cmd("sudo virsh attach-interface %s"
620                          " bridge %s" % (libvirt_instance_name, bridge))
621
622
623 def detach_instance_from_ext_br(instance, compute_node):
624     libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
625     mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
626                                "grep running | awk '{print $2}'); "
627                                "do echo -n ; sudo virsh dumpxml $vm| "
628                                "grep -oP '52:54:[\da-f:]+' ;done")
629     compute_node.run_cmd("sudo virsh detach-interface --domain %s"
630                          " --type bridge --mac %s"
631                          % (libvirt_instance_name, mac))
632
633     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
634     if installer_type == "fuel":
635         bridge = "br-ex"
636     elif installer_type == "apex":
637         # In Apex, br-ex is an ovs bridge and virsh attach-interface
638         # won't just work. We work around it by creating a linux
639         # bridge, attaching that to br-ex with a veth pair
640         # and virsh-attaching the instance to the linux-bridge
641         bridge = "br-quagga"
642         cmd = """
643             sudo brctl delif {bridge} quagga-tap &&
644             sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
645             sudo ip link set dev quagga-tap down &&
646             sudo ip link set dev ovs-quagga-tap down &&
647             sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
648             sudo ip link set {bridge} down &&
649             sudo brctl delbr {bridge}
650         """
651         compute_node.run_cmd(cmd.format(bridge=bridge))
652
653
654 def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
655                     subnet_ids, router_ids, network_ids):
656
657     if len(floatingip_ids) != 0:
658         for floatingip_id in floatingip_ids:
659             if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
660                 logger.error('Fail to delete all floating ips. '
661                              'Floating ip with id {} was not deleted.'.
662                              format(floatingip_id))
663                 return False
664
665     if len(bgpvpn_ids) != 0:
666         for bgpvpn_id in bgpvpn_ids:
667             delete_bgpvpn(neutron_client, bgpvpn_id)
668
669     if len(interfaces) != 0:
670         for router_id, subnet_id in interfaces:
671             if not os_utils.remove_interface_router(neutron_client,
672                                                     router_id, subnet_id):
673                 logger.error('Fail to delete all interface routers. '
674                              'Interface router with id {} was not deleted.'.
675                              format(router_id))
676
677     if len(router_ids) != 0:
678         for router_id in router_ids:
679             if not os_utils.remove_gateway_router(neutron_client, router_id):
680                 logger.error('Fail to delete all gateway routers. '
681                              'Gateway router with id {} was not deleted.'.
682                              format(router_id))
683
684     if len(subnet_ids) != 0:
685         for subnet_id in subnet_ids:
686             if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
687                 logger.error('Fail to delete all subnets. '
688                              'Subnet with id {} was not deleted.'.
689                              format(subnet_id))
690                 return False
691
692     if len(router_ids) != 0:
693         for router_id in router_ids:
694             if not os_utils.delete_neutron_router(neutron_client, router_id):
695                 logger.error('Fail to delete all routers. '
696                              'Router with id {} was not deleted.'.
697                              format(router_id))
698                 return False
699
700     if len(network_ids) != 0:
701         for network_id in network_ids:
702             if not os_utils.delete_neutron_net(neutron_client, network_id):
703                 logger.error('Fail to delete all networks. '
704                              'Network with id {} was not deleted.'.
705                              format(network_id))
706                 return False
707     return True
708
709
710 def cleanup_nova(nova_client, instance_ids, flavor_ids=None):
711     if flavor_ids is not None and len(flavor_ids) != 0:
712         for flavor_id in flavor_ids:
713             nova_client.flavors.delete(flavor_id)
714     if len(instance_ids) != 0:
715         for instance_id in instance_ids:
716             if not os_utils.delete_instance(nova_client, instance_id):
717                 logger.error('Fail to delete all instances. '
718                              'Instance with id {} was not deleted.'.
719                              format(instance_id))
720             else:
721                 wait_for_instance_delete(nova_client, instance_id)
722     return True
723
724
725 def cleanup_glance(conn, image_ids):
726     if len(image_ids) != 0:
727         for image_id in image_ids:
728             if not os_utils.delete_glance_image(conn, image_id):
729                 logger.error('Fail to delete all images. '
730                              'Image with id {} was not deleted.'.
731                              format(image_id))
732                 return False
733     return True
734
735
736 def create_bgpvpn(neutron_client, **kwargs):
737     # route_distinguishers
738     # route_targets
739     json_body = {"bgpvpn": kwargs}
740     return neutron_client.create_bgpvpn(json_body)
741
742
743 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
744     json_body = {"bgpvpn": kwargs}
745     return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
746
747
748 def delete_bgpvpn(neutron_client, bgpvpn_id):
749     return neutron_client.delete_bgpvpn(bgpvpn_id)
750
751
752 def get_bgpvpn(neutron_client, bgpvpn_id):
753     return neutron_client.show_bgpvpn(bgpvpn_id)
754
755
756 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
757     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
758
759
760 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
761     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
762
763
764 def create_router_association(neutron_client, bgpvpn_id, router_id):
765     json_body = {"router_association": {"router_id": router_id}}
766     return neutron_client.create_router_association(bgpvpn_id, json_body)
767
768
769 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
770     json_body = {"network_association": {"network_id": neutron_network_id}}
771     return neutron_client.create_network_association(bgpvpn_id, json_body)
772
773
774 def is_fail_mode_secure():
775     """
776     Checks the value of the attribute fail_mode,
777     if it is set to secure. This check is performed
778     on all OVS br-int interfaces, for all OpenStack nodes.
779     """
780     is_secure = {}
781     openstack_nodes = get_nodes()
782     get_ovs_int_cmd = ("sudo ovs-vsctl show | "
783                        "grep -i bridge | "
784                        "awk '{print $2}'")
785     # Define OVS get fail_mode command
786     get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
787     for openstack_node in openstack_nodes:
788         if not openstack_node.is_active():
789             continue
790
791         ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
792                         strip().split('\n'))
793         if 'br-int' in ovs_int_list:
794             # Execute get fail_mode command
795             br_int_fail_mode = (openstack_node.
796                                 run_cmd(get_ovs_fail_mode_cmd).strip())
797             if br_int_fail_mode == 'secure':
798                 # success
799                 is_secure[openstack_node.name] = True
800             else:
801                 # failure
802                 logger.error('The fail_mode for br-int was not secure '
803                              'in {} node'.format(openstack_node.name))
804                 is_secure[openstack_node.name] = False
805     return is_secure
806
807
808 def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
809                                 subnet_quota, port_quota, router_quota):
810     json_body = {"quota": {
811         "network": nw_quota,
812         "subnet": subnet_quota,
813         "port": port_quota,
814         "router": router_quota
815     }}
816
817     try:
818         neutron_client.update_quota(tenant_id=tenant_id,
819                                     body=json_body)
820         return True
821     except Exception as e:
822         logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
823                      " '%s', '%s', '%s', '%s, %s')]: %s" %
824                      (tenant_id, nw_quota, subnet_quota,
825                       port_quota, router_quota, e))
826         return False
827
828
829 def update_instance_quota_class(nova_client, instances_quota):
830     try:
831         nova_client.quota_classes.update("default", instances=instances_quota)
832         return True
833     except Exception as e:
834         logger.error("Error [update_instance_quota_class(nova_client,"
835                      " '%s' )]: %s" % (instances_quota, e))
836         return False
837
838
839 def get_neutron_quota(neutron_client, tenant_id):
840     try:
841         return neutron_client.show_quota(tenant_id=tenant_id)['quota']
842     except Exception as e:
843         logger.error("Error in getting neutron quota for tenant "
844                      " '%s' )]: %s" % (tenant_id, e))
845         raise
846
847
848 def get_nova_instances_quota(nova_client):
849     try:
850         return nova_client.quota_classes.get("default").instances
851     except Exception as e:
852         logger.error("Error in getting nova instances quota: %s" % e)
853         raise
854
855
856 def update_router_extra_route(neutron_client, router_id, extra_routes):
857     if len(extra_routes) <= 0:
858         return
859     routes_list = []
860     for extra_route in extra_routes:
861         route_dict = {'destination': extra_route.destination,
862                       'nexthop': extra_route.nexthop}
863         routes_list.append(route_dict)
864     json_body = {'router': {
865         "routes": routes_list
866     }}
867
868     try:
869         neutron_client.update_router(router_id, body=json_body)
870         return True
871     except Exception as e:
872         logger.error("Error in updating router with extra route: %s" % e)
873         raise
874
875
876 def update_router_no_extra_route(neutron_client, router_ids):
877     json_body = {'router': {
878         "routes": [
879         ]}}
880
881     for router_id in router_ids:
882         try:
883             neutron_client.update_router(router_id, body=json_body)
884             return True
885         except Exception as e:
886             logger.error("Error in clearing extra route: %s" % e)
887
888
889 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
890     """
891     Gets, as input, a list of compute nodes and a list of OVS bridges
892     and returns the command console output, as a list of lines, that
893     contains all the OVS groups from all bridges and nodes in lists.
894     """
895     cmd_out_lines = []
896     for compute_node in compute_node_list:
897         for ovs_br in ovs_br_list:
898             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
899                 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
900                                   "grep group".format(ovs_br, of_protocol))
901                 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
902                                   split("\n"))
903     return cmd_out_lines
904
905
906 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
907     """
908     Gets, as input, a list of compute nodes and a list of OVS bridges
909     and returns the command console output, as a list of lines, that
910     contains all the OVS flows from all bridges and nodes in lists.
911     """
912     cmd_out_lines = []
913     for compute_node in compute_node_list:
914         for ovs_br in ovs_br_list:
915             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
916                 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
917                                  "grep table=".format(ovs_br, of_protocol))
918                 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
919                                   split("\n"))
920     return cmd_out_lines
921
922
923 def get_odl_bgp_entity_owner(controllers):
924     """ Finds the ODL owner of the BGP entity in the cluster.
925
926     When ODL runs in clustering mode we need to execute the BGP speaker
927     related commands to that ODL which is the owner of the BGP entity.
928
929     :param controllers: list of OS controllers
930     :return controller: OS controller in which ODL BGP entity owner runs
931     """
932     if len(controllers) == 1:
933         return controllers[0]
934     else:
935         url = ('http://admin:admin@{ip}:8081/restconf/'
936                'operational/entity-owners:entity-owners/entity-type/bgp'
937                .format(ip=controllers[0].ip))
938
939         remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
940                                 'initial/akka.conf')
941         remote_odl_home_akka_conf = '/home/heat-admin/akka.conf'
942         local_tmp_akka_conf = '/tmp/akka.conf'
943         try:
944             json_output = requests.get(url).json()
945         except Exception:
946             logger.error('Failed to find the ODL BGP '
947                          'entity owner through REST')
948             return None
949         odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
950
951         for controller in controllers:
952
953             controller.run_cmd('sudo cp {0} /home/heat-admin/'
954                                .format(remote_odl_akka_conf))
955             controller.run_cmd('sudo chmod 777 {0}'
956                                .format(remote_odl_home_akka_conf))
957             controller.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
958
959             for line in open(local_tmp_akka_conf):
960                 if re.search(odl_bgp_owner, line):
961                     return controller
962         return None
963
964
965 def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
966     json_body = {'input':
967                  {'destination-ip': remote_tep_ip,
968                   'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
969                  }
970     url = ('http://{ip}:8081/restconf/operations/'
971            'itm-rpc:add-external-tunnel-endpoint'.format(ip=controllers[0].ip))
972     headers = {'Content-type': 'application/yang.data+json',
973                'Accept': 'application/yang.data+json'}
974     try:
975         requests.post(url, data=json.dumps(json_body),
976                       headers=headers,
977                       auth=HTTPBasicAuth('admin', 'admin'))
978     except Exception as e:
979         logger.error("Failed to create external tunnel endpoint on"
980                      " ODL for external tep ip %s with error %s"
981                      % (remote_tep_ip, e))
982     return None
983
984
985 def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
986     url = ('http://admin:admin@{ip}:8081/restconf/config/odl-fib:fibEntries/'
987            'vrfTables/{vrf}/'.format(ip=controllers[0].ip, vrf=vrf_id))
988     logger.error("url is %s" % url)
989     try:
990         vrf_table = requests.get(url).json()
991         is_ipprefix_exists = False
992         for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
993             if vrf_entry['destPrefix'] == ip_prefix:
994                 is_ipprefix_exists = True
995                 break
996         return is_ipprefix_exists
997     except Exception as e:
998         logger.error('Failed to find ip prefix %s with error %s'
999                      % (ip_prefix, e))
1000     return False