Merge "Route exchange test with testcase3"
[sdnvpn.git] / sdnvpn / lib / utils.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 import json
11 import logging
12 import os
13 import time
14 import requests
15 import re
16 import subprocess
17 from concurrent.futures import ThreadPoolExecutor
18 from requests.auth import HTTPBasicAuth
19
20 from opnfv.deployment.factory import Factory as DeploymentFactory
21
22 from sdnvpn.lib import config as sdnvpn_config
23 import sdnvpn.lib.openstack_utils as os_utils
24
25 logger = logging.getLogger('sdnvpn_test_utils')
26
27 common_config = sdnvpn_config.CommonConfig()
28
29 ODL_USER = 'admin'
30 ODL_PASS = 'admin'
31
32 executor = ThreadPoolExecutor(5)
33
34
35 class ExtraRoute(object):
36     """
37     Class to represent extra route for a router
38     """
39
40     def __init__(self, destination, nexthop):
41         self.destination = destination
42         self.nexthop = nexthop
43
44
45 class AllowedAddressPair(object):
46     """
47     Class to represent allowed address pair for a neutron port
48     """
49
50     def __init__(self, ipaddress, macaddress):
51         self.ipaddress = ipaddress
52         self.macaddress = macaddress
53
54
55 def create_default_flavor():
56     return os_utils.get_or_create_flavor(common_config.default_flavor,
57                                          common_config.default_flavor_ram,
58                                          common_config.default_flavor_disk,
59                                          common_config.default_flavor_vcpus)
60
61
62 def create_custom_flavor():
63     return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
64                                          common_config.custom_flavor_ram,
65                                          common_config.custom_flavor_disk,
66                                          common_config.custom_flavor_vcpus)
67
68
69 def create_net(neutron_client, name):
70     logger.debug("Creating network %s", name)
71     net_id = os_utils.create_neutron_net(neutron_client, name)
72     if not net_id:
73         logger.error(
74             "There has been a problem when creating the neutron network")
75         raise Exception("There has been a problem when creating"
76                         " the neutron network {}".format(name))
77     return net_id
78
79
80 def create_subnet(neutron_client, name, cidr, net_id):
81     logger.debug("Creating subnet %s in network %s with cidr %s",
82                  name, net_id, cidr)
83     subnet_id = os_utils.create_neutron_subnet(neutron_client,
84                                                name,
85                                                cidr,
86                                                net_id)
87     if not subnet_id:
88         logger.error(
89             "There has been a problem when creating the neutron subnet")
90         raise Exception("There has been a problem when creating"
91                         " the neutron subnet {}".format(name))
92     return subnet_id
93
94
95 def create_network(neutron_client, net, subnet1, cidr1,
96                    router, subnet2=None, cidr2=None):
97     """Network assoc won't work for networks/subnets created by this function.
98     It is an ODL limitation due to it handling routers as vpns.
99     See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
100     network_dic = os_utils.create_network_full(neutron_client,
101                                                net,
102                                                subnet1,
103                                                router,
104                                                cidr1)
105     if not network_dic:
106         logger.error(
107             "There has been a problem when creating the neutron network")
108         raise Exception("There has been a problem when creating"
109                         " the neutron network {}".format(net))
110     net_id = network_dic["net_id"]
111     subnet_id = network_dic["subnet_id"]
112     router_id = network_dic["router_id"]
113
114     if subnet2 is not None:
115         logger.debug("Creating and attaching a second subnet...")
116         subnet_id = os_utils.create_neutron_subnet(
117             neutron_client, subnet2, cidr2, net_id)
118         if not subnet_id:
119             logger.error(
120                 "There has been a problem when creating the second subnet")
121             raise Exception("There has been a problem when creating"
122                             " the second subnet {}".format(subnet2))
123         logger.debug("Subnet '%s' created successfully" % subnet_id)
124     return net_id, subnet_id, router_id
125
126
127 def get_port(neutron_client, instance_id):
128     ports = os_utils.get_port_list(neutron_client)
129     if ports is not None:
130         for port in ports:
131             if port['device_id'] == instance_id:
132                 return port
133     return None
134
135
136 def update_port_allowed_address_pairs(neutron_client, port_id, address_pairs):
137     if len(address_pairs) <= 0:
138         return
139     allowed_address_pairs = []
140     for address_pair in address_pairs:
141         address_pair_dict = {'ip_address': address_pair.ipaddress,
142                              'mac_address': address_pair.macaddress}
143         allowed_address_pairs.append(address_pair_dict)
144     json_body = {'port': {
145         "allowed_address_pairs": allowed_address_pairs
146     }}
147
148     try:
149         port = neutron_client.update_port(port=port_id,
150                                           body=json_body)
151         return port['port']['id']
152     except Exception as e:
153         logger.error("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
154                      " %s" % (port_id, address_pairs, e))
155         return None
156
157
158 def create_instance(nova_client,
159                     name,
160                     image_id,
161                     network_id,
162                     sg_id,
163                     secgroup_name=None,
164                     fixed_ip=None,
165                     compute_node='',
166                     userdata=None,
167                     files=None,
168                     **kwargs
169                     ):
170     if 'flavor' not in kwargs:
171         kwargs['flavor'] = common_config.default_flavor
172
173     logger.info("Creating instance '%s'..." % name)
174     logger.debug(
175         "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
176         " network=%s\n secgroup=%s \n hypervisor=%s \n"
177         " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
178         % (name, kwargs['flavor'], image_id, network_id, sg_id,
179            compute_node, fixed_ip, files, userdata))
180     instance = os_utils.create_instance_and_wait_for_active(
181         kwargs['flavor'],
182         image_id,
183         network_id,
184         name,
185         config_drive=True,
186         userdata=userdata,
187         av_zone=compute_node,
188         fixed_ip=fixed_ip,
189         files=files)
190
191     if instance is None:
192         logger.error("Error while booting instance.")
193         raise Exception("Error while booting instance {}".format(name))
194     else:
195         logger.debug("Instance '%s' booted successfully. IP='%s'." %
196                      (name, instance.networks.itervalues().next()[0]))
197     # Retrieve IP of INSTANCE
198     # instance_ip = instance.networks.get(network_id)[0]
199
200     if secgroup_name:
201         logger.debug("Adding '%s' to security group '%s'..."
202                      % (name, secgroup_name))
203     else:
204         logger.debug("Adding '%s' to security group '%s'..."
205                      % (name, sg_id))
206     os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
207
208     return instance
209
210
211 def generate_ping_userdata(ips_array, ping_count=10):
212     ips = ""
213     for ip in ips_array:
214         ips = ("%s %s" % (ips, ip))
215
216     ips = ips.replace('  ', ' ')
217     return ("#!/bin/sh\n"
218             "set%s\n"
219             "while true; do\n"
220             " for i do\n"
221             "  ip=$i\n"
222             "  ping -c %s $ip 2>&1 >/dev/null\n"
223             "  RES=$?\n"
224             "  if [ \"Z$RES\" = \"Z0\" ] ; then\n"
225             "   echo ping $ip OK\n"
226             "  else echo ping $ip KO\n"
227             "  fi\n"
228             " done\n"
229             " sleep 1\n"
230             "done\n"
231             % (ips, ping_count))
232
233
234 def generate_userdata_common():
235     return ("#!/bin/sh\n"
236             "sudo mkdir -p /home/cirros/.ssh/\n"
237             "sudo chown cirros:cirros /home/cirros/.ssh/\n"
238             "sudo chown cirros:cirros /home/cirros/id_rsa\n"
239             "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
240             "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
241             "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
242             "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
243             "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
244             "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
245             "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
246             "chmod 700 /home/cirros/.ssh\n"
247             "chmod 644 /home/cirros/.ssh/authorized_keys\n"
248             "chmod 600 /home/cirros/.ssh/id_rsa\n"
249             )
250
251
252 def generate_userdata_with_ssh(ips_array):
253     u1 = generate_userdata_common()
254
255     ips = ""
256     for ip in ips_array:
257         ips = ("%s %s" % (ips, ip))
258
259     ips = ips.replace('  ', ' ')
260     u2 = ("#!/bin/sh\n"
261           "set%s\n"
262           "while true; do\n"
263           " for i do\n"
264           "  ip=$i\n"
265           "  hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
266           "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
267           "  RES=$?\n"
268           "  if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
269           "  else echo $ip 'not reachable';fi;\n"
270           " done\n"
271           " sleep 1\n"
272           "done\n"
273           % ips)
274     return (u1 + u2)
275
276
277 def generate_userdata_interface_create(interface_name, interface_number,
278                                        ip_Address, net_mask):
279     return ("#!/bin/sh\n"
280             "set -xe\n"
281             "sudo useradd -m sdnvpn\n"
282             "sudo adduser sdnvpn sudo\n"
283             "sudo echo sdnvpn:opnfv | chpasswd\n"
284             "sleep 20\n"
285             "sudo ifconfig %s:%s %s netmask %s up\n"
286             % (interface_name, interface_number,
287                ip_Address, net_mask))
288
289
290 def get_installerHandler():
291     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
292     installer_ip = get_installer_ip()
293
294     if installer_type not in ["fuel", "apex"]:
295         logger.warn("installer type %s is neither fuel nor apex."
296                     "returning None for installer handler" % installer_type)
297         return None
298     else:
299         if installer_type in ["apex"]:
300             developHandler = DeploymentFactory.get_handler(
301                 installer_type,
302                 installer_ip,
303                 'root',
304                 pkey_file="/root/.ssh/id_rsa")
305
306         if installer_type in ["fuel"]:
307             developHandler = DeploymentFactory.get_handler(
308                 installer_type,
309                 installer_ip,
310                 'root',
311                 'r00tme')
312         return developHandler
313
314
315 def get_nodes():
316     developHandler = get_installerHandler()
317     return developHandler.get_nodes()
318
319
320 def get_installer_ip():
321     return str(os.environ['INSTALLER_IP'])
322
323
324 def get_instance_ip(instance):
325     instance_ip = instance.networks.itervalues().next()[0]
326     return instance_ip
327
328
329 def wait_for_instance(instance, pattern=".* login:", tries=40):
330     logger.info("Waiting for instance %s to boot up" % instance.id)
331     sleep_time = 2
332     expected_regex = re.compile(pattern)
333     console_log = ""
334     while tries > 0 and not expected_regex.search(console_log):
335         console_log = instance.get_console_output()
336         time.sleep(sleep_time)
337         tries -= 1
338
339     if not expected_regex.search(console_log):
340         logger.error("Instance %s does not boot up properly."
341                      % instance.id)
342         return False
343     return True
344
345
346 def wait_for_instances_up(*instances):
347     check = [wait_for_instance(instance) for instance in instances]
348     return all(check)
349
350
351 def wait_for_instances_get_dhcp(*instances):
352     check = [wait_for_instance(instance, "Lease of .* obtained")
353              for instance in instances]
354     return all(check)
355
356
357 def async_Wait_for_instances(instances, tries=40):
358     if len(instances) <= 0:
359         return
360     futures = []
361     for instance in instances:
362         future = executor.submit(wait_for_instance,
363                                  instance,
364                                  ".* login:",
365                                  tries)
366         futures.append(future)
367     results = []
368     for future in futures:
369         results.append(future.result())
370     if False in results:
371         logger.error("one or more instances is not yet booted up")
372
373
374 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
375     tries = 30
376     sleep_time = 1
377     nets = []
378     logger.debug("Waiting for network %s to associate with BGPVPN %s "
379                  % (bgpvpn_id, net_id))
380
381     while tries > 0 and net_id not in nets:
382         nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
383         time.sleep(sleep_time)
384         tries -= 1
385     if net_id not in nets:
386         logger.error("Association of network %s with BGPVPN %s failed" %
387                      (net_id, bgpvpn_id))
388         return False
389     return True
390
391
392 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
393     check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
394              for id in args]
395     # Return True if all associations succeeded
396     return all(check)
397
398
399 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
400     tries = 30
401     sleep_time = 1
402     routers = []
403     logger.debug("Waiting for router %s to associate with BGPVPN %s "
404                  % (bgpvpn_id, router_id))
405     while tries > 0 and router_id not in routers:
406         routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
407         time.sleep(sleep_time)
408         tries -= 1
409     if router_id not in routers:
410         logger.error("Association of router %s with BGPVPN %s failed" %
411                      (router_id, bgpvpn_id))
412         return False
413     return True
414
415
416 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
417     check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
418              for id in args]
419     # Return True if all associations succeeded
420     return all(check)
421
422
423 def wait_before_subtest(*args, **kwargs):
424     ''' This is a placeholder.
425         TODO: Replace delay with polling logic. '''
426     time.sleep(30)
427
428
429 def assert_and_get_compute_nodes(nova_client, required_node_number=2):
430     """Get the compute nodes in the deployment
431     Exit if the deployment doesn't have enough compute nodes"""
432     compute_nodes = os_utils.get_hypervisors(nova_client)
433
434     num_compute_nodes = len(compute_nodes)
435     if num_compute_nodes < 2:
436         logger.error("There are %s compute nodes in the deployment. "
437                      "Minimum number of nodes to complete the test is 2."
438                      % num_compute_nodes)
439         raise Exception("There are {} compute nodes in the deployment. "
440                         "Minimum number of nodes to complete the test"
441                         " is 2.".format(num_compute_nodes))
442
443     logger.debug("Compute nodes: %s" % compute_nodes)
444     return compute_nodes
445
446
447 def open_icmp(neutron_client, security_group_id):
448     if os_utils.check_security_group_rules(neutron_client,
449                                            security_group_id,
450                                            'ingress',
451                                            'icmp'):
452
453         if not os_utils.create_secgroup_rule(neutron_client,
454                                              security_group_id,
455                                              'ingress',
456                                              'icmp'):
457             logger.error("Failed to create icmp security group rule...")
458     else:
459         logger.info("This rule exists for security group: %s"
460                     % security_group_id)
461
462
463 def open_http_port(neutron_client, security_group_id):
464     if os_utils.check_security_group_rules(neutron_client,
465                                            security_group_id,
466                                            'ingress',
467                                            'tcp',
468                                            80, 80):
469
470         if not os_utils.create_secgroup_rule(neutron_client,
471                                              security_group_id,
472                                              'ingress',
473                                              'tcp',
474                                              80, 80):
475
476             logger.error("Failed to create http security group rule...")
477     else:
478         logger.info("This rule exists for security group: %s"
479                     % security_group_id)
480
481
482 def open_bgp_port(neutron_client, security_group_id):
483     if os_utils.check_security_group_rules(neutron_client,
484                                            security_group_id,
485                                            'ingress',
486                                            'tcp',
487                                            179, 179):
488
489         if not os_utils.create_secgroup_rule(neutron_client,
490                                              security_group_id,
491                                              'ingress',
492                                              'tcp',
493                                              179, 179):
494             logger.error("Failed to create bgp security group rule...")
495     else:
496         logger.info("This rule exists for security group: %s"
497                     % security_group_id)
498
499
500 def exec_cmd(cmd, verbose):
501     success = True
502     logger.debug("Executing '%s'" % cmd)
503     p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
504                          stderr=subprocess.STDOUT)
505     output = ""
506     for line in iter(p.stdout.readline, b''):
507         output += line
508
509     if verbose:
510         logger.debug(output)
511
512     p.stdout.close()
513     returncode = p.wait()
514     if returncode != 0:
515         logger.error("Command %s failed to execute." % cmd)
516         success = False
517
518     return output, success
519
520
521 def check_odl_fib(ip, controller_ip):
522     """Check that there is an entry in the ODL Fib for `ip`"""
523     url = "http://" + controller_ip + \
524           ":8181/restconf/config/odl-fib:fibEntries/"
525     logger.debug("Querring '%s' for FIB entries", url)
526     res = requests.get(url, auth=(ODL_USER, ODL_PASS))
527     if res.status_code != 200:
528         logger.error("OpenDaylight response status code: %s", res.status_code)
529         return False
530     logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
531                  % controller_ip)
532     logger.debug("OpenDaylight FIB: \n%s" % res.text)
533     return ip in res.text
534
535
536 def run_odl_cmd(odl_node, cmd):
537     '''Run a command in the OpenDaylight Karaf shell
538     This is a bit flimsy because of shell quote escaping, make sure that
539     the cmd passed does not have any top level double quotes or this
540     function will break.
541     The /dev/null is used because client works, but outputs something
542     that contains "ERROR" and run_cmd doesn't like that.
543     '''
544     karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
545                  ' 2>/dev/null' % cmd)
546     return odl_node.run_cmd(karaf_cmd)
547
548
549 def wait_for_cloud_init(instance):
550     success = True
551     # ubuntu images take a long time to start
552     tries = 20
553     sleep_time = 30
554     logger.info("Waiting for cloud init of instance: {}"
555                 "".format(instance.name))
556     while tries > 0:
557         instance_log = instance.get_console_output()
558         if "Failed to run module" in instance_log:
559             success = False
560             logger.error("Cloud init failed to run. Reason: %s",
561                          instance_log)
562             break
563         if re.search(r"Cloud-init v. .+ finished at", instance_log):
564             success = True
565             break
566         time.sleep(sleep_time)
567         tries = tries - 1
568
569     if tries == 0:
570         logger.error("Cloud init timed out"
571                      ". Reason: %s",
572                      instance_log)
573         success = False
574     logger.info("Finished waiting for cloud init of instance {} result was {}"
575                 "".format(instance.name, success))
576     return success
577
578
579 def attach_instance_to_ext_br(instance, compute_node):
580     libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
581     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
582     if installer_type == "fuel":
583         bridge = "br-ex"
584     elif installer_type == "apex":
585         # In Apex, br-ex is an ovs bridge and virsh attach-interface
586         # won't just work. We work around it by creating a linux
587         # bridge, attaching that to br-ex with a veth pair
588         # and virsh-attaching the instance to the linux-bridge
589         bridge = "br-quagga"
590         cmd = """
591         set -e
592         if ! sudo brctl show |grep -q ^{bridge};then
593           sudo brctl addbr {bridge}
594           sudo ip link set {bridge} up
595           sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
596           sudo ip link set dev ovs-quagga-tap up
597           sudo ip link set dev quagga-tap up
598           sudo ovs-vsctl add-port br-ex ovs-quagga-tap
599           sudo brctl addif {bridge} quagga-tap
600         fi
601         """
602         compute_node.run_cmd(cmd.format(bridge=bridge))
603
604     compute_node.run_cmd("sudo virsh attach-interface %s"
605                          " bridge %s" % (libvirt_instance_name, bridge))
606
607
608 def detach_instance_from_ext_br(instance, compute_node):
609     libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
610     mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
611                                "grep running | awk '{print $2}'); "
612                                "do echo -n ; sudo virsh dumpxml $vm| "
613                                "grep -oP '52:54:[\da-f:]+' ;done")
614     compute_node.run_cmd("sudo virsh detach-interface --domain %s"
615                          " --type bridge --mac %s"
616                          % (libvirt_instance_name, mac))
617
618     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
619     if installer_type == "fuel":
620         bridge = "br-ex"
621     elif installer_type == "apex":
622         # In Apex, br-ex is an ovs bridge and virsh attach-interface
623         # won't just work. We work around it by creating a linux
624         # bridge, attaching that to br-ex with a veth pair
625         # and virsh-attaching the instance to the linux-bridge
626         bridge = "br-quagga"
627         cmd = """
628             sudo brctl delif {bridge} quagga-tap &&
629             sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
630             sudo ip link set dev quagga-tap down &&
631             sudo ip link set dev ovs-quagga-tap down &&
632             sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
633             sudo ip link set {bridge} down &&
634             sudo brctl delbr {bridge}
635         """
636         compute_node.run_cmd(cmd.format(bridge=bridge))
637
638
639 def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
640                     subnet_ids, router_ids, network_ids):
641
642     if len(floatingip_ids) != 0:
643         for floatingip_id in floatingip_ids:
644             if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
645                 logger.error('Fail to delete all floating ips. '
646                              'Floating ip with id {} was not deleted.'.
647                              format(floatingip_id))
648                 return False
649
650     if len(bgpvpn_ids) != 0:
651         for bgpvpn_id in bgpvpn_ids:
652             delete_bgpvpn(neutron_client, bgpvpn_id)
653
654     if len(interfaces) != 0:
655         for router_id, subnet_id in interfaces:
656             if not os_utils.remove_interface_router(neutron_client,
657                                                     router_id, subnet_id):
658                 logger.error('Fail to delete all interface routers. '
659                              'Interface router with id {} was not deleted.'.
660                              format(router_id))
661
662     if len(router_ids) != 0:
663         for router_id in router_ids:
664             if not os_utils.remove_gateway_router(neutron_client, router_id):
665                 logger.error('Fail to delete all gateway routers. '
666                              'Gateway router with id {} was not deleted.'.
667                              format(router_id))
668
669     if len(subnet_ids) != 0:
670         for subnet_id in subnet_ids:
671             if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
672                 logger.error('Fail to delete all subnets. '
673                              'Subnet with id {} was not deleted.'.
674                              format(subnet_id))
675                 return False
676
677     if len(router_ids) != 0:
678         for router_id in router_ids:
679             if not os_utils.delete_neutron_router(neutron_client, router_id):
680                 logger.error('Fail to delete all routers. '
681                              'Router with id {} was not deleted.'.
682                              format(router_id))
683                 return False
684
685     if len(network_ids) != 0:
686         for network_id in network_ids:
687             if not os_utils.delete_neutron_net(neutron_client, network_id):
688                 logger.error('Fail to delete all networks. '
689                              'Network with id {} was not deleted.'.
690                              format(network_id))
691                 return False
692     return True
693
694
695 def cleanup_nova(nova_client, instance_ids, flavor_ids=None):
696     if flavor_ids is not None and len(flavor_ids) != 0:
697         for flavor_id in flavor_ids:
698             nova_client.flavors.delete(flavor_id)
699     if len(instance_ids) != 0:
700         for instance_id in instance_ids:
701             if not os_utils.delete_instance(nova_client, instance_id):
702                 logger.error('Fail to delete all instances. '
703                              'Instance with id {} was not deleted.'.
704                              format(instance_id))
705                 return False
706     return True
707
708
709 def cleanup_glance(glance_client, image_ids):
710     if len(image_ids) != 0:
711         for image_id in image_ids:
712             if not os_utils.delete_glance_image(glance_client, image_id):
713                 logger.error('Fail to delete all images. '
714                              'Image with id {} was not deleted.'.
715                              format(image_id))
716                 return False
717     return True
718
719
720 def create_bgpvpn(neutron_client, **kwargs):
721     # route_distinguishers
722     # route_targets
723     json_body = {"bgpvpn": kwargs}
724     return neutron_client.create_bgpvpn(json_body)
725
726
727 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
728     json_body = {"bgpvpn": kwargs}
729     return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
730
731
732 def delete_bgpvpn(neutron_client, bgpvpn_id):
733     return neutron_client.delete_bgpvpn(bgpvpn_id)
734
735
736 def get_bgpvpn(neutron_client, bgpvpn_id):
737     return neutron_client.show_bgpvpn(bgpvpn_id)
738
739
740 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
741     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
742
743
744 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
745     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
746
747
748 def create_router_association(neutron_client, bgpvpn_id, router_id):
749     json_body = {"router_association": {"router_id": router_id}}
750     return neutron_client.create_router_association(bgpvpn_id, json_body)
751
752
753 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
754     json_body = {"network_association": {"network_id": neutron_network_id}}
755     return neutron_client.create_network_association(bgpvpn_id, json_body)
756
757
758 def is_fail_mode_secure():
759     """
760     Checks the value of the attribute fail_mode,
761     if it is set to secure. This check is performed
762     on all OVS br-int interfaces, for all OpenStack nodes.
763     """
764     is_secure = {}
765     openstack_nodes = get_nodes()
766     get_ovs_int_cmd = ("sudo ovs-vsctl show | "
767                        "grep -i bridge | "
768                        "awk '{print $2}'")
769     # Define OVS get fail_mode command
770     get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
771     for openstack_node in openstack_nodes:
772         if not openstack_node.is_active():
773             continue
774
775         ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
776                         strip().split('\n'))
777         if 'br-int' in ovs_int_list:
778             # Execute get fail_mode command
779             br_int_fail_mode = (openstack_node.
780                                 run_cmd(get_ovs_fail_mode_cmd).strip())
781             if br_int_fail_mode == 'secure':
782                 # success
783                 is_secure[openstack_node.name] = True
784             else:
785                 # failure
786                 logger.error('The fail_mode for br-int was not secure '
787                              'in {} node'.format(openstack_node.name))
788                 is_secure[openstack_node.name] = False
789     return is_secure
790
791
792 def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
793                                 subnet_quota, port_quota, router_quota):
794     json_body = {"quota": {
795         "network": nw_quota,
796         "subnet": subnet_quota,
797         "port": port_quota,
798         "router": router_quota
799     }}
800
801     try:
802         neutron_client.update_quota(tenant_id=tenant_id,
803                                     body=json_body)
804         return True
805     except Exception as e:
806         logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
807                      " '%s', '%s', '%s', '%s, %s')]: %s" %
808                      (tenant_id, nw_quota, subnet_quota,
809                       port_quota, router_quota, e))
810         return False
811
812
813 def update_instance_quota_class(nova_client, instances_quota):
814     try:
815         nova_client.quota_classes.update("default", instances=instances_quota)
816         return True
817     except Exception as e:
818         logger.error("Error [update_instance_quota_class(nova_client,"
819                      " '%s' )]: %s" % (instances_quota, e))
820         return False
821
822
823 def get_neutron_quota(neutron_client, tenant_id):
824     try:
825         return neutron_client.show_quota(tenant_id=tenant_id)['quota']
826     except Exception as e:
827         logger.error("Error in getting neutron quota for tenant "
828                      " '%s' )]: %s" % (tenant_id, e))
829         raise
830
831
832 def get_nova_instances_quota(nova_client):
833     try:
834         return nova_client.quota_classes.get("default").instances
835     except Exception as e:
836         logger.error("Error in getting nova instances quota: %s" % e)
837         raise
838
839
840 def update_router_extra_route(neutron_client, router_id, extra_routes):
841     if len(extra_routes) <= 0:
842         return
843     routes_list = []
844     for extra_route in extra_routes:
845         route_dict = {'destination': extra_route.destination,
846                       'nexthop': extra_route.nexthop}
847         routes_list.append(route_dict)
848     json_body = {'router': {
849         "routes": routes_list
850     }}
851
852     try:
853         neutron_client.update_router(router_id, body=json_body)
854         return True
855     except Exception as e:
856         logger.error("Error in updating router with extra route: %s" % e)
857         raise
858
859
860 def update_router_no_extra_route(neutron_client, router_ids):
861     json_body = {'router': {
862         "routes": [
863         ]}}
864
865     for router_id in router_ids:
866         try:
867             neutron_client.update_router(router_id, body=json_body)
868             return True
869         except Exception as e:
870             logger.error("Error in clearing extra route: %s" % e)
871
872
873 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
874     """
875     Gets, as input, a list of compute nodes and a list of OVS bridges
876     and returns the command console output, as a list of lines, that
877     contains all the OVS groups from all bridges and nodes in lists.
878     """
879     cmd_out_lines = []
880     for compute_node in compute_node_list:
881         for ovs_br in ovs_br_list:
882             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
883                 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
884                                   "grep group".format(ovs_br, of_protocol))
885                 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
886                                   split("\n"))
887     return cmd_out_lines
888
889
890 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
891     """
892     Gets, as input, a list of compute nodes and a list of OVS bridges
893     and returns the command console output, as a list of lines, that
894     contains all the OVS flows from all bridges and nodes in lists.
895     """
896     cmd_out_lines = []
897     for compute_node in compute_node_list:
898         for ovs_br in ovs_br_list:
899             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
900                 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
901                                  "grep table=".format(ovs_br, of_protocol))
902                 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
903                                   split("\n"))
904     return cmd_out_lines
905
906
907 def get_odl_bgp_entity_owner(controllers):
908     """ Finds the ODL owner of the BGP entity in the cluster.
909
910     When ODL runs in clustering mode we need to execute the BGP speaker
911     related commands to that ODL which is the owner of the BGP entity.
912
913     :param controllers: list of OS controllers
914     :return controller: OS controller in which ODL BGP entity owner runs
915     """
916     if len(controllers) == 1:
917         return controllers[0]
918     else:
919         url = ('http://admin:admin@{ip}:8081/restconf/'
920                'operational/entity-owners:entity-owners/entity-type/bgp'
921                .format(ip=controllers[0].ip))
922
923         remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
924                                 'initial/akka.conf')
925         remote_odl_home_akka_conf = '/home/heat-admin/akka.conf'
926         local_tmp_akka_conf = '/tmp/akka.conf'
927         try:
928             json_output = requests.get(url).json()
929         except Exception:
930             logger.error('Failed to find the ODL BGP '
931                          'entity owner through REST')
932             return None
933         odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
934
935         for controller in controllers:
936
937             controller.run_cmd('sudo cp {0} /home/heat-admin/'
938                                .format(remote_odl_akka_conf))
939             controller.run_cmd('sudo chmod 777 {0}'
940                                .format(remote_odl_home_akka_conf))
941             controller.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
942
943             for line in open(local_tmp_akka_conf):
944                 if re.search(odl_bgp_owner, line):
945                     return controller
946         return None
947
948
949 def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
950     json_body = {'input':
951                  {'destination-ip': remote_tep_ip,
952                   'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
953                  }
954     url = ('http://{ip}:8081/restconf/operations/'
955            'itm-rpc:add-external-tunnel-endpoint'.format(ip=controllers[0].ip))
956     headers = {'Content-type': 'application/yang.data+json',
957                'Accept': 'application/yang.data+json'}
958     try:
959         requests.post(url, data=json.dumps(json_body),
960                       headers=headers,
961                       auth=HTTPBasicAuth('admin', 'admin'))
962     except Exception as e:
963         logger.error("Failed to create external tunnel endpoint on"
964                      " ODL for external tep ip %s with error %s"
965                      % (remote_tep_ip, e))
966     return None
967
968
969 def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
970     url = ('http://admin:admin@{ip}:8081/restconf/config/odl-fib:fibEntries/'
971            'vrfTables/{vrf}/'.format(ip=controllers[0].ip, vrf=vrf_id))
972     logger.error("url is %s" % url)
973     try:
974         vrf_table = requests.get(url).json()
975         is_ipprefix_exists = False
976         for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
977             if vrf_entry['destPrefix'] == ip_prefix:
978                 is_ipprefix_exists = True
979                 break
980         return is_ipprefix_exists
981     except Exception as e:
982         logger.error('Failed to find ip prefix %s with error %s'
983                      % (ip_prefix, e))
984     return False