aafd69b36a53b4831171fecdee2973f8b798eab9
[sdnvpn.git] / sdnvpn / lib / utils.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 import os
11 import time
12 import requests
13 import re
14 import subprocess
15 from concurrent.futures import ThreadPoolExecutor
16
17 from opnfv.deployment.factory import Factory as DeploymentFactory
18
19 from sdnvpn.lib import config as sdnvpn_config
20 import sdnvpn.lib.openstack_utils as os_utils
21 from sdnvpn.lib import logutil
22
23 logger = logutil.getLogger('sdnvpn_test_utils')
24
25 common_config = sdnvpn_config.CommonConfig()
26
27 ODL_USER = 'admin'
28 ODL_PASS = 'admin'
29
30 executor = ThreadPoolExecutor(5)
31
32
33 class ExtraRoute(object):
34     """
35     Class to represent extra route for a router
36     """
37
38     def __init__(self, destination, nexthop):
39         self.destination = destination
40         self.nexthop = nexthop
41
42
43 class AllowedAddressPair(object):
44     """
45     Class to represent allowed address pair for a neutron port
46     """
47
48     def __init__(self, ipaddress, macaddress):
49         self.ipaddress = ipaddress
50         self.macaddress = macaddress
51
52
53 def create_default_flavor():
54     return os_utils.get_or_create_flavor(common_config.default_flavor,
55                                          common_config.default_flavor_ram,
56                                          common_config.default_flavor_disk,
57                                          common_config.default_flavor_vcpus)
58
59
60 def create_custom_flavor():
61     return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
62                                          common_config.custom_flavor_ram,
63                                          common_config.custom_flavor_disk,
64                                          common_config.custom_flavor_vcpus)
65
66
67 def create_net(neutron_client, name):
68     logger.debug("Creating network %s", name)
69     net_id = os_utils.create_neutron_net(neutron_client, name)
70     if not net_id:
71         logger.error(
72             "There has been a problem when creating the neutron network")
73         raise Exception("There has been a problem when creating"
74                         " the neutron network {}".format(name))
75     return net_id
76
77
78 def create_subnet(neutron_client, name, cidr, net_id):
79     logger.debug("Creating subnet %s in network %s with cidr %s",
80                  name, net_id, cidr)
81     subnet_id = os_utils.create_neutron_subnet(neutron_client,
82                                                name,
83                                                cidr,
84                                                net_id)
85     if not subnet_id:
86         logger.error(
87             "There has been a problem when creating the neutron subnet")
88         raise Exception("There has been a problem when creating"
89                         " the neutron subnet {}".format(name))
90     return subnet_id
91
92
93 def create_network(neutron_client, net, subnet1, cidr1,
94                    router, subnet2=None, cidr2=None):
95     """Network assoc won't work for networks/subnets created by this function.
96     It is an ODL limitation due to it handling routers as vpns.
97     See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
98     network_dic = os_utils.create_network_full(neutron_client,
99                                                net,
100                                                subnet1,
101                                                router,
102                                                cidr1)
103     if not network_dic:
104         logger.error(
105             "There has been a problem when creating the neutron network")
106         raise Exception("There has been a problem when creating"
107                         " the neutron network {}".format(net))
108     net_id = network_dic["net_id"]
109     subnet_id = network_dic["subnet_id"]
110     router_id = network_dic["router_id"]
111
112     if subnet2 is not None:
113         logger.debug("Creating and attaching a second subnet...")
114         subnet_id = os_utils.create_neutron_subnet(
115             neutron_client, subnet2, cidr2, net_id)
116         if not subnet_id:
117             logger.error(
118                 "There has been a problem when creating the second subnet")
119             raise Exception("There has been a problem when creating"
120                             " the second subnet {}".format(subnet2))
121         logger.debug("Subnet '%s' created successfully" % subnet_id)
122     return net_id, subnet_id, router_id
123
124
125 def get_port(neutron_client, instance_id):
126     ports = os_utils.get_port_list(neutron_client)
127     if ports is not None:
128         for port in ports:
129             if port['device_id'] == instance_id:
130                 return port
131     return None
132
133
134 def update_port_allowed_address_pairs(neutron_client, port_id, address_pairs):
135     if len(address_pairs) <= 0:
136         return
137     allowed_address_pairs = []
138     for address_pair in address_pairs:
139         address_pair_dict = {'ip_address': address_pair.ipaddress,
140                              'mac_address': address_pair.macaddress}
141         allowed_address_pairs.append(address_pair_dict)
142     json_body = {'port': {
143         "allowed_address_pairs": allowed_address_pairs
144     }}
145
146     try:
147         port = neutron_client.update_port(port=port_id,
148                                           body=json_body)
149         return port['port']['id']
150     except Exception as e:
151         logger.error("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
152                      " %s" % (port_id, address_pairs, e))
153         return None
154
155
156 def create_instance(nova_client,
157                     name,
158                     image_id,
159                     network_id,
160                     sg_id,
161                     secgroup_name=None,
162                     fixed_ip=None,
163                     compute_node='',
164                     userdata=None,
165                     files=None,
166                     **kwargs
167                     ):
168     if 'flavor' not in kwargs:
169         kwargs['flavor'] = common_config.default_flavor
170
171     logger.info("Creating instance '%s'..." % name)
172     logger.debug(
173         "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
174         " network=%s\n secgroup=%s \n hypervisor=%s \n"
175         " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
176         % (name, kwargs['flavor'], image_id, network_id, sg_id,
177            compute_node, fixed_ip, files, userdata))
178     instance = os_utils.create_instance_and_wait_for_active(
179         kwargs['flavor'],
180         image_id,
181         network_id,
182         name,
183         config_drive=True,
184         userdata=userdata,
185         av_zone=compute_node,
186         fixed_ip=fixed_ip,
187         files=files)
188
189     if instance is None:
190         logger.error("Error while booting instance.")
191         raise Exception("Error while booting instance {}".format(name))
192     else:
193         logger.debug("Instance '%s' booted successfully. IP='%s'." %
194                      (name, instance.networks.itervalues().next()[0]))
195     # Retrieve IP of INSTANCE
196     # instance_ip = instance.networks.get(network_id)[0]
197
198     if secgroup_name:
199         logger.debug("Adding '%s' to security group '%s'..."
200                      % (name, secgroup_name))
201     else:
202         logger.debug("Adding '%s' to security group '%s'..."
203                      % (name, sg_id))
204     os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
205
206     return instance
207
208
209 def generate_ping_userdata(ips_array, ping_count=10):
210     ips = ""
211     for ip in ips_array:
212         ips = ("%s %s" % (ips, ip))
213
214     ips = ips.replace('  ', ' ')
215     return ("#!/bin/sh\n"
216             "set%s\n"
217             "while true; do\n"
218             " for i do\n"
219             "  ip=$i\n"
220             "  ping -c %s $ip 2>&1 >/dev/null\n"
221             "  RES=$?\n"
222             "  if [ \"Z$RES\" = \"Z0\" ] ; then\n"
223             "   echo ping $ip OK\n"
224             "  else echo ping $ip KO\n"
225             "  fi\n"
226             " done\n"
227             " sleep 1\n"
228             "done\n"
229             % (ips, ping_count))
230
231
232 def generate_userdata_common():
233     return ("#!/bin/sh\n"
234             "sudo mkdir -p /home/cirros/.ssh/\n"
235             "sudo chown cirros:cirros /home/cirros/.ssh/\n"
236             "sudo chown cirros:cirros /home/cirros/id_rsa\n"
237             "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
238             "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
239             "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
240             "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
241             "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
242             "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
243             "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
244             "chmod 700 /home/cirros/.ssh\n"
245             "chmod 644 /home/cirros/.ssh/authorized_keys\n"
246             "chmod 600 /home/cirros/.ssh/id_rsa\n"
247             )
248
249
250 def generate_userdata_with_ssh(ips_array):
251     u1 = generate_userdata_common()
252
253     ips = ""
254     for ip in ips_array:
255         ips = ("%s %s" % (ips, ip))
256
257     ips = ips.replace('  ', ' ')
258     u2 = ("#!/bin/sh\n"
259           "set%s\n"
260           "while true; do\n"
261           " for i do\n"
262           "  ip=$i\n"
263           "  hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
264           "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
265           "  RES=$?\n"
266           "  if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
267           "  else echo $ip 'not reachable';fi;\n"
268           " done\n"
269           " sleep 1\n"
270           "done\n"
271           % ips)
272     return (u1 + u2)
273
274
275 def generate_userdata_interface_create(interface_name, interface_number,
276                                        ip_Address, net_mask):
277     return ("#!/bin/sh\n"
278             "set -xe\n"
279             "sudo useradd -m sdnvpn\n"
280             "sudo adduser sdnvpn sudo\n"
281             "sudo echo sdnvpn:opnfv | chpasswd\n"
282             "sleep 20\n"
283             "sudo ifconfig %s:%s %s netmask %s up\n"
284             % (interface_name, interface_number,
285                ip_Address, net_mask))
286
287
288 def get_installerHandler():
289     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
290     installer_ip = get_installer_ip()
291
292     if installer_type not in ["fuel", "apex"]:
293         logger.warn("installer type %s is neither fuel nor apex."
294                     "returning None for installer handler" % installer_type)
295         return None
296     else:
297         if installer_type in ["apex"]:
298             developHandler = DeploymentFactory.get_handler(
299                 installer_type,
300                 installer_ip,
301                 'root',
302                 pkey_file="/root/.ssh/id_rsa")
303
304         if installer_type in ["fuel"]:
305             developHandler = DeploymentFactory.get_handler(
306                 installer_type,
307                 installer_ip,
308                 'root',
309                 'r00tme')
310         return developHandler
311
312
313 def get_nodes():
314     developHandler = get_installerHandler()
315     return developHandler.get_nodes()
316
317
318 def get_installer_ip():
319     return str(os.environ['INSTALLER_IP'])
320
321
322 def get_instance_ip(instance):
323     instance_ip = instance.networks.itervalues().next()[0]
324     return instance_ip
325
326
327 def wait_for_instance(instance, pattern=".* login:", tries=40):
328     logger.info("Waiting for instance %s to boot up" % instance.id)
329     sleep_time = 2
330     expected_regex = re.compile(pattern)
331     console_log = ""
332     while tries > 0 and not expected_regex.search(console_log):
333         console_log = instance.get_console_output()
334         time.sleep(sleep_time)
335         tries -= 1
336
337     if not expected_regex.search(console_log):
338         logger.error("Instance %s does not boot up properly."
339                      % instance.id)
340         return False
341     return True
342
343
344 def wait_for_instances_up(*instances):
345     check = [wait_for_instance(instance) for instance in instances]
346     return all(check)
347
348
349 def wait_for_instances_get_dhcp(*instances):
350     check = [wait_for_instance(instance, "Lease of .* obtained")
351              for instance in instances]
352     return all(check)
353
354
355 def async_Wait_for_instances(instances, tries=40):
356     if len(instances) <= 0:
357         return
358     futures = []
359     for instance in instances:
360         future = executor.submit(wait_for_instance,
361                                  instance,
362                                  ".* login:",
363                                  tries)
364         futures.append(future)
365     results = []
366     for future in futures:
367         results.append(future.result())
368     if False in results:
369         logger.error("one or more instances is not yet booted up")
370
371
372 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
373     tries = 30
374     sleep_time = 1
375     nets = []
376     logger.debug("Waiting for network %s to associate with BGPVPN %s "
377                  % (bgpvpn_id, net_id))
378
379     while tries > 0 and net_id not in nets:
380         nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
381         time.sleep(sleep_time)
382         tries -= 1
383     if net_id not in nets:
384         logger.error("Association of network %s with BGPVPN %s failed" %
385                      (net_id, bgpvpn_id))
386         return False
387     return True
388
389
390 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
391     check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
392              for id in args]
393     # Return True if all associations succeeded
394     return all(check)
395
396
397 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
398     tries = 30
399     sleep_time = 1
400     routers = []
401     logger.debug("Waiting for router %s to associate with BGPVPN %s "
402                  % (bgpvpn_id, router_id))
403     while tries > 0 and router_id not in routers:
404         routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
405         time.sleep(sleep_time)
406         tries -= 1
407     if router_id not in routers:
408         logger.error("Association of router %s with BGPVPN %s failed" %
409                      (router_id, bgpvpn_id))
410         return False
411     return True
412
413
414 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
415     check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
416              for id in args]
417     # Return True if all associations succeeded
418     return all(check)
419
420
421 def wait_before_subtest(*args, **kwargs):
422     ''' This is a placeholder.
423         TODO: Replace delay with polling logic. '''
424     time.sleep(30)
425
426
427 def assert_and_get_compute_nodes(nova_client, required_node_number=2):
428     """Get the compute nodes in the deployment
429     Exit if the deployment doesn't have enough compute nodes"""
430     compute_nodes = os_utils.get_hypervisors(nova_client)
431
432     num_compute_nodes = len(compute_nodes)
433     if num_compute_nodes < 2:
434         logger.error("There are %s compute nodes in the deployment. "
435                      "Minimum number of nodes to complete the test is 2."
436                      % num_compute_nodes)
437         raise Exception("There are {} compute nodes in the deployment. "
438                         "Minimum number of nodes to complete the test"
439                         " is 2.".format(num_compute_nodes))
440
441     logger.debug("Compute nodes: %s" % compute_nodes)
442     return compute_nodes
443
444
445 def open_icmp(neutron_client, security_group_id):
446     if os_utils.check_security_group_rules(neutron_client,
447                                            security_group_id,
448                                            'ingress',
449                                            'icmp'):
450
451         if not os_utils.create_secgroup_rule(neutron_client,
452                                              security_group_id,
453                                              'ingress',
454                                              'icmp'):
455             logger.error("Failed to create icmp security group rule...")
456     else:
457         logger.info("This rule exists for security group: %s"
458                     % security_group_id)
459
460
461 def open_http_port(neutron_client, security_group_id):
462     if os_utils.check_security_group_rules(neutron_client,
463                                            security_group_id,
464                                            'ingress',
465                                            'tcp',
466                                            80, 80):
467
468         if not os_utils.create_secgroup_rule(neutron_client,
469                                              security_group_id,
470                                              'ingress',
471                                              'tcp',
472                                              80, 80):
473
474             logger.error("Failed to create http security group rule...")
475     else:
476         logger.info("This rule exists for security group: %s"
477                     % security_group_id)
478
479
480 def open_bgp_port(neutron_client, security_group_id):
481     if os_utils.check_security_group_rules(neutron_client,
482                                            security_group_id,
483                                            'ingress',
484                                            'tcp',
485                                            179, 179):
486
487         if not os_utils.create_secgroup_rule(neutron_client,
488                                              security_group_id,
489                                              'ingress',
490                                              'tcp',
491                                              179, 179):
492             logger.error("Failed to create bgp security group rule...")
493     else:
494         logger.info("This rule exists for security group: %s"
495                     % security_group_id)
496
497
498 def exec_cmd(cmd, verbose):
499     success = True
500     logger.debug("Executing '%s'" % cmd)
501     p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
502                          stderr=subprocess.STDOUT)
503     output = ""
504     for line in iter(p.stdout.readline, b''):
505         output += line
506
507     if verbose:
508         logger.debug(output)
509
510     p.stdout.close()
511     returncode = p.wait()
512     if returncode != 0:
513         logger.error("Command %s failed to execute." % cmd)
514         success = False
515
516     return output, success
517
518
519 def check_odl_fib(ip, controller_ip):
520     """Check that there is an entry in the ODL Fib for `ip`"""
521     url = "http://" + controller_ip + \
522           ":8181/restconf/config/odl-fib:fibEntries/"
523     logger.debug("Querring '%s' for FIB entries", url)
524     res = requests.get(url, auth=(ODL_USER, ODL_PASS))
525     if res.status_code != 200:
526         logger.error("OpenDaylight response status code: %s", res.status_code)
527         return False
528     logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
529                  % controller_ip)
530     logger.debug("OpenDaylight FIB: \n%s" % res.text)
531     return ip in res.text
532
533
534 def run_odl_cmd(odl_node, cmd):
535     '''Run a command in the OpenDaylight Karaf shell
536     This is a bit flimsy because of shell quote escaping, make sure that
537     the cmd passed does not have any top level double quotes or this
538     function will break.
539     The /dev/null is used because client works, but outputs something
540     that contains "ERROR" and run_cmd doesn't like that.
541     '''
542     karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
543                  ' 2>/dev/null' % cmd)
544     return odl_node.run_cmd(karaf_cmd)
545
546
547 def wait_for_cloud_init(instance):
548     success = True
549     # ubuntu images take a long time to start
550     tries = 20
551     sleep_time = 30
552     logger.info("Waiting for cloud init of instance: {}"
553                 "".format(instance.name))
554     while tries > 0:
555         instance_log = instance.get_console_output()
556         if "Failed to run module" in instance_log:
557             success = False
558             logger.error("Cloud init failed to run. Reason: %s",
559                          instance_log)
560             break
561         if re.search(r"Cloud-init v. .+ finished at", instance_log):
562             success = True
563             break
564         time.sleep(sleep_time)
565         tries = tries - 1
566
567     if tries == 0:
568         logger.error("Cloud init timed out"
569                      ". Reason: %s",
570                      instance_log)
571         success = False
572     logger.info("Finished waiting for cloud init of instance {} result was {}"
573                 "".format(instance.name, success))
574     return success
575
576
577 def attach_instance_to_ext_br(instance, compute_node):
578     libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
579     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
580     if installer_type == "fuel":
581         bridge = "br-ex"
582     elif installer_type == "apex":
583         # In Apex, br-ex is an ovs bridge and virsh attach-interface
584         # won't just work. We work around it by creating a linux
585         # bridge, attaching that to br-ex with a veth pair
586         # and virsh-attaching the instance to the linux-bridge
587         bridge = "br-quagga"
588         cmd = """
589         set -e
590         if ! sudo brctl show |grep -q ^{bridge};then
591           sudo brctl addbr {bridge}
592           sudo ip link set {bridge} up
593           sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
594           sudo ip link set dev ovs-quagga-tap up
595           sudo ip link set dev quagga-tap up
596           sudo ovs-vsctl add-port br-ex ovs-quagga-tap
597           sudo brctl addif {bridge} quagga-tap
598         fi
599         """
600         compute_node.run_cmd(cmd.format(bridge=bridge))
601
602     compute_node.run_cmd("sudo virsh attach-interface %s"
603                          " bridge %s" % (libvirt_instance_name, bridge))
604
605
606 def detach_instance_from_ext_br(instance, compute_node):
607     libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
608     mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
609                                "grep running | awk '{print $2}'); "
610                                "do echo -n ; sudo virsh dumpxml $vm| "
611                                "grep -oP '52:54:[\da-f:]+' ;done")
612     compute_node.run_cmd("sudo virsh detach-interface --domain %s"
613                          " --type bridge --mac %s"
614                          % (libvirt_instance_name, mac))
615
616     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
617     if installer_type == "fuel":
618         bridge = "br-ex"
619     elif installer_type == "apex":
620         # In Apex, br-ex is an ovs bridge and virsh attach-interface
621         # won't just work. We work around it by creating a linux
622         # bridge, attaching that to br-ex with a veth pair
623         # and virsh-attaching the instance to the linux-bridge
624         bridge = "br-quagga"
625         cmd = """
626             sudo brctl delif {bridge} quagga-tap &&
627             sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
628             sudo ip link set dev quagga-tap down &&
629             sudo ip link set dev ovs-quagga-tap down &&
630             sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
631             sudo ip link set {bridge} down &&
632             sudo brctl delbr {bridge}
633         """
634         compute_node.run_cmd(cmd.format(bridge=bridge))
635
636
637 def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
638                     subnet_ids, router_ids, network_ids):
639
640     if len(floatingip_ids) != 0:
641         for floatingip_id in floatingip_ids:
642             if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
643                 logger.error('Fail to delete all floating ips. '
644                              'Floating ip with id {} was not deleted.'.
645                              format(floatingip_id))
646                 return False
647
648     if len(bgpvpn_ids) != 0:
649         for bgpvpn_id in bgpvpn_ids:
650             delete_bgpvpn(neutron_client, bgpvpn_id)
651
652     if len(interfaces) != 0:
653         for router_id, subnet_id in interfaces:
654             if not os_utils.remove_interface_router(neutron_client,
655                                                     router_id, subnet_id):
656                 logger.error('Fail to delete all interface routers. '
657                              'Interface router with id {} was not deleted.'.
658                              format(router_id))
659
660     if len(router_ids) != 0:
661         for router_id in router_ids:
662             if not os_utils.remove_gateway_router(neutron_client, router_id):
663                 logger.error('Fail to delete all gateway routers. '
664                              'Gateway router with id {} was not deleted.'.
665                              format(router_id))
666
667     if len(subnet_ids) != 0:
668         for subnet_id in subnet_ids:
669             if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
670                 logger.error('Fail to delete all subnets. '
671                              'Subnet with id {} was not deleted.'.
672                              format(subnet_id))
673                 return False
674
675     if len(router_ids) != 0:
676         for router_id in router_ids:
677             if not os_utils.delete_neutron_router(neutron_client, router_id):
678                 logger.error('Fail to delete all routers. '
679                              'Router with id {} was not deleted.'.
680                              format(router_id))
681                 return False
682
683     if len(network_ids) != 0:
684         for network_id in network_ids:
685             if not os_utils.delete_neutron_net(neutron_client, network_id):
686                 logger.error('Fail to delete all networks. '
687                              'Network with id {} was not deleted.'.
688                              format(network_id))
689                 return False
690     return True
691
692
693 def cleanup_nova(nova_client, instance_ids, flavor_ids=None):
694     if flavor_ids is not None and len(flavor_ids) != 0:
695         for flavor_id in flavor_ids:
696             nova_client.flavors.delete(flavor_id)
697     if len(instance_ids) != 0:
698         for instance_id in instance_ids:
699             if not os_utils.delete_instance(nova_client, instance_id):
700                 logger.error('Fail to delete all instances. '
701                              'Instance with id {} was not deleted.'.
702                              format(instance_id))
703                 return False
704     return True
705
706
707 def cleanup_glance(glance_client, image_ids):
708     if len(image_ids) != 0:
709         for image_id in image_ids:
710             if not os_utils.delete_glance_image(glance_client, image_id):
711                 logger.error('Fail to delete all images. '
712                              'Image with id {} was not deleted.'.
713                              format(image_id))
714                 return False
715     return True
716
717
718 def create_bgpvpn(neutron_client, **kwargs):
719     # route_distinguishers
720     # route_targets
721     json_body = {"bgpvpn": kwargs}
722     return neutron_client.create_bgpvpn(json_body)
723
724
725 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
726     json_body = {"bgpvpn": kwargs}
727     return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
728
729
730 def delete_bgpvpn(neutron_client, bgpvpn_id):
731     return neutron_client.delete_bgpvpn(bgpvpn_id)
732
733
734 def get_bgpvpn(neutron_client, bgpvpn_id):
735     return neutron_client.show_bgpvpn(bgpvpn_id)
736
737
738 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
739     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
740
741
742 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
743     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
744
745
746 def create_router_association(neutron_client, bgpvpn_id, router_id):
747     json_body = {"router_association": {"router_id": router_id}}
748     return neutron_client.create_router_association(bgpvpn_id, json_body)
749
750
751 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
752     json_body = {"network_association": {"network_id": neutron_network_id}}
753     return neutron_client.create_network_association(bgpvpn_id, json_body)
754
755
756 def is_fail_mode_secure():
757     """
758     Checks the value of the attribute fail_mode,
759     if it is set to secure. This check is performed
760     on all OVS br-int interfaces, for all OpenStack nodes.
761     """
762     is_secure = {}
763     openstack_nodes = get_nodes()
764     get_ovs_int_cmd = ("sudo ovs-vsctl show | "
765                        "grep -i bridge | "
766                        "awk '{print $2}'")
767     # Define OVS get fail_mode command
768     get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
769     for openstack_node in openstack_nodes:
770         if not openstack_node.is_active():
771             continue
772
773         ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
774                         strip().split('\n'))
775         if 'br-int' in ovs_int_list:
776             # Execute get fail_mode command
777             br_int_fail_mode = (openstack_node.
778                                 run_cmd(get_ovs_fail_mode_cmd).strip())
779             if br_int_fail_mode == 'secure':
780                 # success
781                 is_secure[openstack_node.name] = True
782             else:
783                 # failure
784                 logger.error('The fail_mode for br-int was not secure '
785                              'in {} node'.format(openstack_node.name))
786                 is_secure[openstack_node.name] = False
787     return is_secure
788
789
790 def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
791                                 subnet_quota, port_quota, router_quota):
792     json_body = {"quota": {
793         "network": nw_quota,
794         "subnet": subnet_quota,
795         "port": port_quota,
796         "router": router_quota
797     }}
798
799     try:
800         neutron_client.update_quota(tenant_id=tenant_id,
801                                     body=json_body)
802         return True
803     except Exception as e:
804         logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
805                      " '%s', '%s', '%s', '%s, %s')]: %s" %
806                      (tenant_id, nw_quota, subnet_quota,
807                       port_quota, router_quota, e))
808         return False
809
810
811 def update_instance_quota_class(nova_client, instances_quota):
812     try:
813         nova_client.quota_classes.update("default", instances=instances_quota)
814         return True
815     except Exception as e:
816         logger.error("Error [update_instance_quota_class(nova_client,"
817                      " '%s' )]: %s" % (instances_quota, e))
818         return False
819
820
821 def get_neutron_quota(neutron_client, tenant_id):
822     try:
823         return neutron_client.show_quota(tenant_id=tenant_id)['quota']
824     except Exception as e:
825         logger.error("Error in getting neutron quota for tenant "
826                      " '%s' )]: %s" % (tenant_id, e))
827         raise
828
829
830 def get_nova_instances_quota(nova_client):
831     try:
832         return nova_client.quota_classes.get("default").instances
833     except Exception as e:
834         logger.error("Error in getting nova instances quota: %s" % e)
835         raise
836
837
838 def update_router_extra_route(neutron_client, router_id, extra_routes):
839     if len(extra_routes) <= 0:
840         return
841     routes_list = []
842     for extra_route in extra_routes:
843         route_dict = {'destination': extra_route.destination,
844                       'nexthop': extra_route.nexthop}
845         routes_list.append(route_dict)
846     json_body = {'router': {
847         "routes": routes_list
848     }}
849
850     try:
851         neutron_client.update_router(router_id, body=json_body)
852         return True
853     except Exception as e:
854         logger.error("Error in updating router with extra route: %s" % e)
855         raise
856
857
858 def update_router_no_extra_route(neutron_client, router_ids):
859     json_body = {'router': {
860         "routes": [
861         ]}}
862
863     for router_id in router_ids:
864         try:
865             neutron_client.update_router(router_id, body=json_body)
866             return True
867         except Exception as e:
868             logger.error("Error in clearing extra route: %s" % e)
869
870
871 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
872     """
873     Gets, as input, a list of compute nodes and a list of OVS bridges
874     and returns the command console output, as a list of lines, that
875     contains all the OVS groups from all bridges and nodes in lists.
876     """
877     cmd_out_lines = []
878     for compute_node in compute_node_list:
879         for ovs_br in ovs_br_list:
880             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
881                 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
882                                   "grep group".format(ovs_br, of_protocol))
883                 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
884                                   split("\n"))
885     return cmd_out_lines
886
887
888 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
889     """
890     Gets, as input, a list of compute nodes and a list of OVS bridges
891     and returns the command console output, as a list of lines, that
892     contains all the OVS flows from all bridges and nodes in lists.
893     """
894     cmd_out_lines = []
895     for compute_node in compute_node_list:
896         for ovs_br in ovs_br_list:
897             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
898                 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
899                                  "grep table=".format(ovs_br, of_protocol))
900                 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
901                                   split("\n"))
902     return cmd_out_lines