fix for sdnvpn CI test failure
[sdnvpn.git] / sdnvpn / lib / utils.py
1 #!/usr/bin/python
2 #
3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 import logging
11 import os
12 import sys
13 import time
14 import requests
15 import re
16 import subprocess
17 from concurrent.futures import ThreadPoolExecutor
18
19 import functest.utils.openstack_utils as os_utils
20 from opnfv.deployment.factory import Factory as DeploymentFactory
21
22 from sdnvpn.lib import config as sdnvpn_config
23
24 logger = logging.getLogger('sdnvpn_test_utils')
25
26 common_config = sdnvpn_config.CommonConfig()
27
28 ODL_USER = 'admin'
29 ODL_PASS = 'admin'
30
31 executor = ThreadPoolExecutor(5)
32
33
34 class ExtraRoute(object):
35     """
36     Class to represent extra route for a router
37     """
38     def __init__(self, destination, nexthop):
39         self.destination = destination
40         self.nexthop = nexthop
41
42
43 class AllowedAddressPair(object):
44     """
45     Class to represent allowed address pair for a neutron port
46     """
47     def __init__(self, ipaddress, macaddress):
48         self.ipaddress = ipaddress
49         self.macaddress = macaddress
50
51
52 def create_default_flavor():
53     return os_utils.get_or_create_flavor(common_config.default_flavor,
54                                          common_config.default_flavor_ram,
55                                          common_config.default_flavor_disk,
56                                          common_config.default_flavor_vcpus)
57
58
59 def create_custom_flavor():
60     return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
61                                          common_config.custom_flavor_ram,
62                                          common_config.custom_flavor_disk,
63                                          common_config.custom_flavor_vcpus)
64
65
66 def create_net(neutron_client, name):
67     logger.debug("Creating network %s", name)
68     net_id = os_utils.create_neutron_net(neutron_client, name)
69     if not net_id:
70         logger.error(
71             "There has been a problem when creating the neutron network")
72         sys.exit(-1)
73     return net_id
74
75
76 def create_subnet(neutron_client, name, cidr, net_id):
77     logger.debug("Creating subnet %s in network %s with cidr %s",
78                  name, net_id, cidr)
79     subnet_id = os_utils.create_neutron_subnet(neutron_client,
80                                                name,
81                                                cidr,
82                                                net_id)
83     if not subnet_id:
84         logger.error(
85             "There has been a problem when creating the neutron subnet")
86         sys.exit(-1)
87     return subnet_id
88
89
90 def create_network(neutron_client, net, subnet1, cidr1,
91                    router, subnet2=None, cidr2=None):
92     """Network assoc won't work for networks/subnets created by this function.
93     It is an ODL limitation due to it handling routers as vpns.
94     See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
95     network_dic = os_utils.create_network_full(neutron_client,
96                                                net,
97                                                subnet1,
98                                                router,
99                                                cidr1)
100     if not network_dic:
101         logger.error(
102             "There has been a problem when creating the neutron network")
103         sys.exit(-1)
104     net_id = network_dic["net_id"]
105     subnet_id = network_dic["subnet_id"]
106     router_id = network_dic["router_id"]
107
108     if subnet2 is not None:
109         logger.debug("Creating and attaching a second subnet...")
110         subnet_id = os_utils.create_neutron_subnet(
111             neutron_client, subnet2, cidr2, net_id)
112         if not subnet_id:
113             logger.error(
114                 "There has been a problem when creating the second subnet")
115             sys.exit(-1)
116         logger.debug("Subnet '%s' created successfully" % subnet_id)
117     return net_id, subnet_id, router_id
118
119
120 def get_port(neutron_client, instance_id):
121     ports = os_utils.get_port_list(neutron_client)
122     if ports is not None:
123         for port in ports:
124             if port['device_id'] == instance_id:
125                 return port
126     return None
127
128
129 def update_port_allowed_address_pairs(neutron_client, port_id, address_pairs):
130     if len(address_pairs) <= 0:
131         return
132     allowed_address_pairs = []
133     for address_pair in address_pairs:
134         address_pair_dict = {'ip_address': address_pair.ipaddress,
135                              'mac_address': address_pair.macaddress}
136         allowed_address_pairs.append(address_pair_dict)
137     json_body = {'port': {
138         "allowed_address_pairs": allowed_address_pairs
139     }}
140
141     try:
142         port = neutron_client.update_port(port=port_id,
143                                           body=json_body)
144         return port['port']['id']
145     except Exception as e:
146         logger.error("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
147                      " %s" % (port_id, address_pairs, e))
148         return None
149
150
151 def create_instance(nova_client,
152                     name,
153                     image_id,
154                     network_id,
155                     sg_id,
156                     secgroup_name=None,
157                     fixed_ip=None,
158                     compute_node='',
159                     userdata=None,
160                     files=None,
161                     **kwargs
162                     ):
163     if 'flavor' not in kwargs:
164         kwargs['flavor'] = common_config.default_flavor
165
166     logger.info("Creating instance '%s'..." % name)
167     logger.debug(
168         "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
169         " network=%s\n secgroup=%s \n hypervisor=%s \n"
170         " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
171         % (name, kwargs['flavor'], image_id, network_id, sg_id,
172            compute_node, fixed_ip, files, userdata))
173     instance = os_utils.create_instance_and_wait_for_active(
174         kwargs['flavor'],
175         image_id,
176         network_id,
177         name,
178         config_drive=True,
179         userdata=userdata,
180         av_zone=compute_node,
181         fixed_ip=fixed_ip,
182         files=files)
183
184     if instance is None:
185         logger.error("Error while booting instance.")
186         sys.exit(-1)
187     else:
188         logger.debug("Instance '%s' booted successfully. IP='%s'." %
189                      (name, instance.networks.itervalues().next()[0]))
190     # Retrieve IP of INSTANCE
191     # instance_ip = instance.networks.get(network_id)[0]
192
193     if secgroup_name:
194         logger.debug("Adding '%s' to security group '%s'..."
195                      % (name, secgroup_name))
196     else:
197         logger.debug("Adding '%s' to security group '%s'..."
198                      % (name, sg_id))
199     os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
200
201     return instance
202
203
204 def generate_ping_userdata(ips_array, ping_count=10):
205     ips = ""
206     for ip in ips_array:
207         ips = ("%s %s" % (ips, ip))
208
209     ips = ips.replace('  ', ' ')
210     return ("#!/bin/sh\n"
211             "set%s\n"
212             "while true; do\n"
213             " for i do\n"
214             "  ip=$i\n"
215             "  ping -c %s $ip 2>&1 >/dev/null\n"
216             "  RES=$?\n"
217             "  if [ \"Z$RES\" = \"Z0\" ] ; then\n"
218             "   echo ping $ip OK\n"
219             "  else echo ping $ip KO\n"
220             "  fi\n"
221             " done\n"
222             " sleep 1\n"
223             "done\n"
224             % (ips, ping_count))
225
226
227 def generate_userdata_common():
228     return ("#!/bin/sh\n"
229             "sudo mkdir -p /home/cirros/.ssh/\n"
230             "sudo chown cirros:cirros /home/cirros/.ssh/\n"
231             "sudo chown cirros:cirros /home/cirros/id_rsa\n"
232             "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
233             "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
234             "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
235             "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
236             "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
237             "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
238             "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
239             "chmod 700 /home/cirros/.ssh\n"
240             "chmod 644 /home/cirros/.ssh/authorized_keys\n"
241             "chmod 600 /home/cirros/.ssh/id_rsa\n"
242             )
243
244
245 def generate_userdata_with_ssh(ips_array):
246     u1 = generate_userdata_common()
247
248     ips = ""
249     for ip in ips_array:
250         ips = ("%s %s" % (ips, ip))
251
252     ips = ips.replace('  ', ' ')
253     u2 = ("#!/bin/sh\n"
254           "set%s\n"
255           "while true; do\n"
256           " for i do\n"
257           "  ip=$i\n"
258           "  hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
259           "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
260           "  RES=$?\n"
261           "  if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
262           "  else echo $ip 'not reachable';fi;\n"
263           " done\n"
264           " sleep 1\n"
265           "done\n"
266           % ips)
267     return (u1 + u2)
268
269
270 def generate_userdata_interface_create(interface_name, interface_number,
271                                        ip_Address, net_mask):
272     return ("#!/bin/sh\n"
273             "set -xe\n"
274             "sudo useradd -m sdnvpn\n"
275             "sudo adduser sdnvpn sudo\n"
276             "sudo echo sdnvpn:opnfv | chpasswd\n"
277             "sleep 20\n"
278             "sudo ifconfig %s:%s %s netmask %s up\n"
279             % (interface_name, interface_number,
280                ip_Address, net_mask))
281
282
283 def get_installerHandler():
284     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
285     installer_ip = get_installer_ip()
286
287     if installer_type not in ["fuel", "apex"]:
288         logger.warn("installer type %s is neither fuel nor apex."
289                     "returning None for installer handler" % installer_type)
290         return None
291     else:
292         if installer_type in ["apex"]:
293             developHandler = DeploymentFactory.get_handler(
294                 installer_type,
295                 installer_ip,
296                 'root',
297                 pkey_file="/root/.ssh/id_rsa")
298
299         if installer_type in ["fuel"]:
300             developHandler = DeploymentFactory.get_handler(
301                 installer_type,
302                 installer_ip,
303                 'root',
304                 'r00tme')
305         return developHandler
306
307
308 def get_nodes():
309     developHandler = get_installerHandler()
310     return developHandler.get_nodes()
311
312
313 def get_installer_ip():
314     return str(os.environ['INSTALLER_IP'])
315
316
317 def get_instance_ip(instance):
318     instance_ip = instance.networks.itervalues().next()[0]
319     return instance_ip
320
321
322 def wait_for_instance(instance, pattern=".* login:", tries=40):
323     logger.info("Waiting for instance %s to boot up" % instance.id)
324     sleep_time = 2
325     expected_regex = re.compile(pattern)
326     console_log = ""
327     while tries > 0 and not expected_regex.search(console_log):
328         console_log = instance.get_console_output()
329         time.sleep(sleep_time)
330         tries -= 1
331
332     if not expected_regex.search(console_log):
333         logger.error("Instance %s does not boot up properly."
334                      % instance.id)
335         return False
336     return True
337
338
339 def wait_for_instances_up(*instances):
340     check = [wait_for_instance(instance) for instance in instances]
341     return all(check)
342
343
344 def wait_for_instances_get_dhcp(*instances):
345     check = [wait_for_instance(instance, "Lease of .* obtained")
346              for instance in instances]
347     return all(check)
348
349
350 def async_Wait_for_instances(instances, tries=40):
351     if len(instances) <= 0:
352         return
353     futures = []
354     for instance in instances:
355         future = executor.submit(wait_for_instance,
356                                  instance,
357                                  ".* login:",
358                                  tries)
359         futures.append(future)
360     results = []
361     for future in futures:
362         results.append(future.result())
363     if False in results:
364         logger.error("one or more instances is not yet booted up")
365
366
367 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
368     tries = 30
369     sleep_time = 1
370     nets = []
371     logger.debug("Waiting for network %s to associate with BGPVPN %s "
372                  % (bgpvpn_id, net_id))
373
374     while tries > 0 and net_id not in nets:
375         nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
376         time.sleep(sleep_time)
377         tries -= 1
378     if net_id not in nets:
379         logger.error("Association of network %s with BGPVPN %s failed" %
380                      (net_id, bgpvpn_id))
381         return False
382     return True
383
384
385 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
386     check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
387              for id in args]
388     # Return True if all associations succeeded
389     return all(check)
390
391
392 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
393     tries = 30
394     sleep_time = 1
395     routers = []
396     logger.debug("Waiting for router %s to associate with BGPVPN %s "
397                  % (bgpvpn_id, router_id))
398     while tries > 0 and router_id not in routers:
399         routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
400         time.sleep(sleep_time)
401         tries -= 1
402     if router_id not in routers:
403         logger.error("Association of router %s with BGPVPN %s failed" %
404                      (router_id, bgpvpn_id))
405         return False
406     return True
407
408
409 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
410     check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
411              for id in args]
412     # Return True if all associations succeeded
413     return all(check)
414
415
416 def wait_before_subtest(*args, **kwargs):
417     ''' This is a placeholder.
418         TODO: Replace delay with polling logic. '''
419     time.sleep(30)
420
421
422 def assert_and_get_compute_nodes(nova_client, required_node_number=2):
423     """Get the compute nodes in the deployment
424     Exit if the deployment doesn't have enough compute nodes"""
425     compute_nodes = os_utils.get_hypervisors(nova_client)
426
427     num_compute_nodes = len(compute_nodes)
428     if num_compute_nodes < 2:
429         logger.error("There are %s compute nodes in the deployment. "
430                      "Minimum number of nodes to complete the test is 2."
431                      % num_compute_nodes)
432         sys.exit(-1)
433
434     logger.debug("Compute nodes: %s" % compute_nodes)
435     return compute_nodes
436
437
438 def open_icmp(neutron_client, security_group_id):
439     if os_utils.check_security_group_rules(neutron_client,
440                                            security_group_id,
441                                            'ingress',
442                                            'icmp'):
443
444         if not os_utils.create_secgroup_rule(neutron_client,
445                                              security_group_id,
446                                              'ingress',
447                                              'icmp'):
448             logger.error("Failed to create icmp security group rule...")
449     else:
450         logger.info("This rule exists for security group: %s"
451                     % security_group_id)
452
453
454 def open_http_port(neutron_client, security_group_id):
455     if os_utils.check_security_group_rules(neutron_client,
456                                            security_group_id,
457                                            'ingress',
458                                            'tcp',
459                                            80, 80):
460
461         if not os_utils.create_secgroup_rule(neutron_client,
462                                              security_group_id,
463                                              'ingress',
464                                              'tcp',
465                                              80, 80):
466
467             logger.error("Failed to create http security group rule...")
468     else:
469         logger.info("This rule exists for security group: %s"
470                     % security_group_id)
471
472
473 def open_bgp_port(neutron_client, security_group_id):
474     if os_utils.check_security_group_rules(neutron_client,
475                                            security_group_id,
476                                            'ingress',
477                                            'tcp',
478                                            179, 179):
479
480         if not os_utils.create_secgroup_rule(neutron_client,
481                                              security_group_id,
482                                              'ingress',
483                                              'tcp',
484                                              179, 179):
485             logger.error("Failed to create bgp security group rule...")
486     else:
487         logger.info("This rule exists for security group: %s"
488                     % security_group_id)
489
490
491 def exec_cmd(cmd, verbose):
492     success = True
493     logger.debug("Executing '%s'" % cmd)
494     p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
495                          stderr=subprocess.STDOUT)
496     output = ""
497     for line in iter(p.stdout.readline, b''):
498         output += line
499
500     if verbose:
501         logger.debug(output)
502
503     p.stdout.close()
504     returncode = p.wait()
505     if returncode != 0:
506         logger.error("Command %s failed to execute." % cmd)
507         success = False
508
509     return output, success
510
511
512 def check_odl_fib(ip, controller_ip):
513     """Check that there is an entry in the ODL Fib for `ip`"""
514     url = "http://" + controller_ip + \
515           ":8181/restconf/config/odl-fib:fibEntries/"
516     logger.debug("Querring '%s' for FIB entries", url)
517     res = requests.get(url, auth=(ODL_USER, ODL_PASS))
518     if res.status_code != 200:
519         logger.error("OpenDaylight response status code: %s", res.status_code)
520         return False
521     logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
522                  % controller_ip)
523     logger.debug("OpenDaylight FIB: \n%s" % res.text)
524     return ip in res.text
525
526
527 def run_odl_cmd(odl_node, cmd):
528     '''Run a command in the OpenDaylight Karaf shell
529     This is a bit flimsy because of shell quote escaping, make sure that
530     the cmd passed does not have any top level double quotes or this
531     function will break.
532     The /dev/null is used because client works, but outputs something
533     that contains "ERROR" and run_cmd doesn't like that.
534     '''
535     karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
536                  ' 2>/dev/null' % cmd)
537     return odl_node.run_cmd(karaf_cmd)
538
539
540 def wait_for_cloud_init(instance):
541     success = True
542     # ubuntu images take a long time to start
543     tries = 20
544     sleep_time = 30
545     logger.info("Waiting for cloud init of instance: {}"
546                 "".format(instance.name))
547     while tries > 0:
548         instance_log = instance.get_console_output()
549         if "Failed to run module" in instance_log:
550             success = False
551             logger.error("Cloud init failed to run. Reason: %s",
552                          instance_log)
553             break
554         if re.search(r"Cloud-init v. .+ finished at", instance_log):
555             success = True
556             break
557         time.sleep(sleep_time)
558         tries = tries - 1
559
560     if tries == 0:
561         logger.error("Cloud init timed out"
562                      ". Reason: %s",
563                      instance_log)
564         success = False
565     logger.info("Finished waiting for cloud init of instance {} result was {}"
566                 "".format(instance.name, success))
567     return success
568
569
570 def attach_instance_to_ext_br(instance, compute_node):
571     libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
572     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
573     if installer_type == "fuel":
574         bridge = "br-ex"
575     elif installer_type == "apex":
576         # In Apex, br-ex is an ovs bridge and virsh attach-interface
577         # won't just work. We work around it by creating a linux
578         # bridge, attaching that to br-ex with a veth pair
579         # and virsh-attaching the instance to the linux-bridge
580         bridge = "br-quagga"
581         cmd = """
582         set -e
583         if ! sudo brctl show |grep -q ^{bridge};then
584           sudo brctl addbr {bridge}
585           sudo ip link set {bridge} up
586           sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
587           sudo ip link set dev ovs-quagga-tap up
588           sudo ip link set dev quagga-tap up
589           sudo ovs-vsctl add-port br-ex ovs-quagga-tap
590           sudo brctl addif {bridge} quagga-tap
591         fi
592         """
593         compute_node.run_cmd(cmd.format(bridge=bridge))
594
595     compute_node.run_cmd("sudo virsh attach-interface %s"
596                          " bridge %s" % (libvirt_instance_name, bridge))
597
598
599 def detach_instance_from_ext_br(instance, compute_node):
600     libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
601     mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
602                                "grep running | awk '{print $2}'); "
603                                "do echo -n ; sudo virsh dumpxml $vm| "
604                                "grep -oP '52:54:[\da-f:]+' ;done")
605     compute_node.run_cmd("sudo virsh detach-interface --domain %s"
606                          " --type bridge --mac %s"
607                          % (libvirt_instance_name, mac))
608
609     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
610     if installer_type == "fuel":
611         bridge = "br-ex"
612     elif installer_type == "apex":
613         # In Apex, br-ex is an ovs bridge and virsh attach-interface
614         # won't just work. We work around it by creating a linux
615         # bridge, attaching that to br-ex with a veth pair
616         # and virsh-attaching the instance to the linux-bridge
617         bridge = "br-quagga"
618         cmd = """
619             sudo brctl delif {bridge} quagga-tap &&
620             sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
621             sudo ip link set dev quagga-tap down &&
622             sudo ip link set dev ovs-quagga-tap down &&
623             sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
624             sudo ip link set {bridge} down &&
625             sudo brctl delbr {bridge}
626         """
627         compute_node.run_cmd(cmd.format(bridge=bridge))
628
629
630 def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
631                     subnet_ids, router_ids, network_ids):
632
633     if len(floatingip_ids) != 0:
634         for floatingip_id in floatingip_ids:
635             if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
636                 logging.error('Fail to delete all floating ips. '
637                               'Floating ip with id {} was not deleted.'.
638                               format(floatingip_id))
639                 return False
640
641     if len(bgpvpn_ids) != 0:
642         for bgpvpn_id in bgpvpn_ids:
643             delete_bgpvpn(neutron_client, bgpvpn_id)
644
645     if len(interfaces) != 0:
646         for router_id, subnet_id in interfaces:
647             if not os_utils.remove_interface_router(neutron_client,
648                                                     router_id, subnet_id):
649                 logging.error('Fail to delete all interface routers. '
650                               'Interface router with id {} was not deleted.'.
651                               format(router_id))
652
653     if len(router_ids) != 0:
654         for router_id in router_ids:
655             if not os_utils.remove_gateway_router(neutron_client, router_id):
656                 logging.error('Fail to delete all gateway routers. '
657                               'Gateway router with id {} was not deleted.'.
658                               format(router_id))
659
660     if len(subnet_ids) != 0:
661         for subnet_id in subnet_ids:
662             if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
663                 logging.error('Fail to delete all subnets. '
664                               'Subnet with id {} was not deleted.'.
665                               format(subnet_id))
666                 return False
667
668     if len(router_ids) != 0:
669         for router_id in router_ids:
670             if not os_utils.delete_neutron_router(neutron_client, router_id):
671                 logging.error('Fail to delete all routers. '
672                               'Router with id {} was not deleted.'.
673                               format(router_id))
674                 return False
675
676     if len(network_ids) != 0:
677         for network_id in network_ids:
678             if not os_utils.delete_neutron_net(neutron_client, network_id):
679                 logging.error('Fail to delete all networks. '
680                               'Network with id {} was not deleted.'.
681                               format(network_id))
682                 return False
683     return True
684
685
686 def cleanup_nova(nova_client, instance_ids, flavor_ids=None):
687     if flavor_ids is not None and len(flavor_ids) != 0:
688         for flavor_id in flavor_ids:
689             if not nova_client.flavors.delete(flavor_id):
690                 logging.error('Fail to delete flavor. '
691                               'Flavor with id {} was not deleted.'.
692                               format(flavor_id))
693     if len(instance_ids) != 0:
694         for instance_id in instance_ids:
695             if not os_utils.delete_instance(nova_client, instance_id):
696                 logging.error('Fail to delete all instances. '
697                               'Instance with id {} was not deleted.'.
698                               format(instance_id))
699                 return False
700     return True
701
702
703 def cleanup_glance(glance_client, image_ids):
704     if len(image_ids) != 0:
705         for image_id in image_ids:
706             if not os_utils.delete_glance_image(glance_client, image_id):
707                 logging.error('Fail to delete all images. '
708                               'Image with id {} was not deleted.'.
709                               format(image_id))
710                 return False
711     return True
712
713
714 def create_bgpvpn(neutron_client, **kwargs):
715     # route_distinguishers
716     # route_targets
717     json_body = {"bgpvpn": kwargs}
718     return neutron_client.create_bgpvpn(json_body)
719
720
721 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
722     json_body = {"bgpvpn": kwargs}
723     return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
724
725
726 def delete_bgpvpn(neutron_client, bgpvpn_id):
727     return neutron_client.delete_bgpvpn(bgpvpn_id)
728
729
730 def get_bgpvpn(neutron_client, bgpvpn_id):
731     return neutron_client.show_bgpvpn(bgpvpn_id)
732
733
734 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
735     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
736
737
738 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
739     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
740
741
742 def create_router_association(neutron_client, bgpvpn_id, router_id):
743     json_body = {"router_association": {"router_id": router_id}}
744     return neutron_client.create_router_association(bgpvpn_id, json_body)
745
746
747 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
748     json_body = {"network_association": {"network_id": neutron_network_id}}
749     return neutron_client.create_network_association(bgpvpn_id, json_body)
750
751
752 def is_fail_mode_secure():
753     """
754     Checks the value of the attribute fail_mode,
755     if it is set to secure. This check is performed
756     on all OVS br-int interfaces, for all OpenStack nodes.
757     """
758     is_secure = {}
759     openstack_nodes = get_nodes()
760     get_ovs_int_cmd = ("sudo ovs-vsctl show | "
761                        "grep -i bridge | "
762                        "awk '{print $2}'")
763     # Define OVS get fail_mode command
764     get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
765     for openstack_node in openstack_nodes:
766         if not openstack_node.is_active():
767             continue
768
769         ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
770                         strip().split('\n'))
771         if 'br-int' in ovs_int_list:
772             # Execute get fail_mode command
773             br_int_fail_mode = (openstack_node.
774                                 run_cmd(get_ovs_fail_mode_cmd).strip())
775             if br_int_fail_mode == 'secure':
776                 # success
777                 is_secure[openstack_node.name] = True
778             else:
779                 # failure
780                 logging.error('The fail_mode for br-int was not secure '
781                               'in {} node'.format(openstack_node.name))
782                 is_secure[openstack_node.name] = False
783     return is_secure
784
785
786 def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
787                                 subnet_quota, port_quota, router_quota):
788     json_body = {"quota": {
789         "network": nw_quota,
790         "subnet": subnet_quota,
791         "port": port_quota,
792         "router": router_quota
793     }}
794
795     try:
796         neutron_client.update_quota(tenant_id=tenant_id,
797                                     body=json_body)
798         return True
799     except Exception as e:
800         logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
801                      " '%s', '%s', '%s', '%s, %s')]: %s" %
802                      (tenant_id, nw_quota, subnet_quota,
803                       port_quota, router_quota, e))
804         return False
805
806
807 def update_instance_quota_class(nova_client, instances_quota):
808     try:
809         nova_client.quota_classes.update("default", instances=instances_quota)
810         return True
811     except Exception as e:
812         logger.error("Error [update_instance_quota_class(nova_client,"
813                      " '%s' )]: %s" % (instances_quota, e))
814         return False
815
816
817 def get_neutron_quota(neutron_client, tenant_id):
818     try:
819         return neutron_client.show_quota(tenant_id=tenant_id)['quota']
820     except Exception as e:
821         logger.error("Error in getting neutron quota for tenant "
822                      " '%s' )]: %s" % (tenant_id, e))
823         raise
824
825
826 def get_nova_instances_quota(nova_client):
827     try:
828         return nova_client.quota_classes.get("default").instances
829     except Exception as e:
830         logger.error("Error in getting nova instances quota: %s" % e)
831         raise
832
833
834 def update_router_extra_route(neutron_client, router_id, extra_routes):
835     if len(extra_routes) <= 0:
836         return
837     routes_list = []
838     for extra_route in extra_routes:
839         route_dict = {'destination': extra_route.destination,
840                       'nexthop': extra_route.nexthop}
841         routes_list.append(route_dict)
842     json_body = {'router': {
843         "routes": routes_list
844     }}
845
846     try:
847         neutron_client.update_router(router_id, body=json_body)
848         return True
849     except Exception as e:
850         logger.error("Error in updating router with extra route: %s" % e)
851         raise
852
853
854 def update_router_no_extra_route(neutron_client, router_ids):
855     json_body = {'router': {
856         "routes": [
857         ]}}
858
859     for router_id in router_ids:
860         try:
861             neutron_client.update_router(router_id, body=json_body)
862             return True
863         except Exception as e:
864             logger.error("Error in clearing extra route: %s" % e)
865
866
867 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
868     """
869     Gets, as input, a list of compute nodes and a list of OVS bridges
870     and returns the command console output, as a list of lines, that
871     contains all the OVS groups from all bridges and nodes in lists.
872     """
873     cmd_out_lines = []
874     for compute_node in compute_node_list:
875         for ovs_br in ovs_br_list:
876             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
877                 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
878                                   "grep group".format(ovs_br, of_protocol))
879                 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
880                                   split("\n"))
881     return cmd_out_lines
882
883
884 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
885     """
886     Gets, as input, a list of compute nodes and a list of OVS bridges
887     and returns the command console output, as a list of lines, that
888     contains all the OVS flows from all bridges and nodes in lists.
889     """
890     cmd_out_lines = []
891     for compute_node in compute_node_list:
892         for ovs_br in ovs_br_list:
893             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
894                 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
895                                  "grep table=".format(ovs_br, of_protocol))
896                 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
897                                   split("\n"))
898     return cmd_out_lines