Merge "Fix instance boot when metadata exists"
[sdnvpn.git] / sdnvpn / lib / utils.py
1 #!/usr/bin/python
2 #
3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 import logging
11 import os
12 import sys
13 import time
14 import requests
15 import re
16 import subprocess
17
18 import functest.utils.openstack_utils as os_utils
19 from opnfv.deployment.factory import Factory as DeploymentFactory
20
21 from sdnvpn.lib import config as sdnvpn_config
22
23 logger = logging.getLogger('sdnvpn_test_utils')
24
25 common_config = sdnvpn_config.CommonConfig()
26
27 ODL_USER = 'admin'
28 ODL_PASS = 'admin'
29
30
31 def create_custom_flavor():
32     return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
33                                          common_config.custom_flavor_ram,
34                                          common_config.custom_flavor_disk,
35                                          common_config.custom_flavor_vcpus)
36
37
38 def create_net(neutron_client, name):
39     logger.debug("Creating network %s", name)
40     net_id = os_utils.create_neutron_net(neutron_client, name)
41     if not net_id:
42         logger.error(
43             "There has been a problem when creating the neutron network")
44         sys.exit(-1)
45     return net_id
46
47
48 def create_subnet(neutron_client, name, cidr, net_id):
49     logger.debug("Creating subnet %s in network %s with cidr %s",
50                  name, net_id, cidr)
51     subnet_id = os_utils.create_neutron_subnet(neutron_client,
52                                                name,
53                                                cidr,
54                                                net_id)
55     if not subnet_id:
56         logger.error(
57             "There has been a problem when creating the neutron subnet")
58         sys.exit(-1)
59     return subnet_id
60
61
62 def create_network(neutron_client, net, subnet1, cidr1,
63                    router, subnet2=None, cidr2=None):
64     """Network assoc won't work for networks/subnets created by this function.
65     It is an ODL limitation due to it handling routers as vpns.
66     See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
67     network_dic = os_utils.create_network_full(neutron_client,
68                                                net,
69                                                subnet1,
70                                                router,
71                                                cidr1)
72     if not network_dic:
73         logger.error(
74             "There has been a problem when creating the neutron network")
75         sys.exit(-1)
76     net_id = network_dic["net_id"]
77     subnet_id = network_dic["subnet_id"]
78     router_id = network_dic["router_id"]
79
80     if subnet2 is not None:
81         logger.debug("Creating and attaching a second subnet...")
82         subnet_id = os_utils.create_neutron_subnet(
83             neutron_client, subnet2, cidr2, net_id)
84         if not subnet_id:
85             logger.error(
86                 "There has been a problem when creating the second subnet")
87             sys.exit(-1)
88         logger.debug("Subnet '%s' created successfully" % subnet_id)
89     return net_id, subnet_id, router_id
90
91
92 def create_instance(nova_client,
93                     name,
94                     image_id,
95                     network_id,
96                     sg_id,
97                     secgroup_name=None,
98                     fixed_ip=None,
99                     compute_node='',
100                     userdata=None,
101                     files=None,
102                     **kwargs
103                     ):
104     if 'flavor' not in kwargs:
105         kwargs['flavor'] = common_config.default_flavor
106
107     logger.info("Creating instance '%s'..." % name)
108     logger.debug(
109         "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
110         " network=%s\n secgroup=%s \n hypervisor=%s \n"
111         " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
112         % (name, kwargs['flavor'], image_id, network_id, sg_id,
113            compute_node, fixed_ip, files, userdata))
114     instance = os_utils.create_instance_and_wait_for_active(
115         kwargs['flavor'],
116         image_id,
117         network_id,
118         name,
119         config_drive=True,
120         userdata=userdata,
121         av_zone=compute_node,
122         fixed_ip=fixed_ip,
123         files=files)
124
125     if instance is None:
126         logger.error("Error while booting instance.")
127         sys.exit(-1)
128     else:
129         logger.debug("Instance '%s' booted successfully. IP='%s'." %
130                      (name, instance.networks.itervalues().next()[0]))
131     # Retrieve IP of INSTANCE
132     # instance_ip = instance.networks.get(network_id)[0]
133
134     if secgroup_name:
135         logger.debug("Adding '%s' to security group '%s'..."
136                      % (name, secgroup_name))
137     else:
138         logger.debug("Adding '%s' to security group '%s'..."
139                      % (name, sg_id))
140     os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
141
142     return instance
143
144
145 def generate_ping_userdata(ips_array, ping_count=10):
146     ips = ""
147     for ip in ips_array:
148         ips = ("%s %s" % (ips, ip))
149
150     ips = ips.replace('  ', ' ')
151     return ("#!/bin/sh\n"
152             "set%s\n"
153             "while true; do\n"
154             " for i do\n"
155             "  ip=$i\n"
156             "  ping -c %s $ip 2>&1 >/dev/null\n"
157             "  RES=$?\n"
158             "  if [ \"Z$RES\" = \"Z0\" ] ; then\n"
159             "   echo ping $ip OK\n"
160             "  else echo ping $ip KO\n"
161             "  fi\n"
162             " done\n"
163             " sleep 1\n"
164             "done\n"
165             % (ips, ping_count))
166
167
168 def generate_userdata_common():
169     return ("#!/bin/sh\n"
170             "sudo mkdir -p /home/cirros/.ssh/\n"
171             "sudo chown cirros:cirros /home/cirros/.ssh/\n"
172             "sudo chown cirros:cirros /home/cirros/id_rsa\n"
173             "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
174             "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
175             "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
176             "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
177             "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
178             "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
179             "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
180             "chmod 700 /home/cirros/.ssh\n"
181             "chmod 644 /home/cirros/.ssh/authorized_keys\n"
182             "chmod 600 /home/cirros/.ssh/id_rsa\n"
183             )
184
185
186 def generate_userdata_with_ssh(ips_array):
187     u1 = generate_userdata_common()
188
189     ips = ""
190     for ip in ips_array:
191         ips = ("%s %s" % (ips, ip))
192
193     ips = ips.replace('  ', ' ')
194     u2 = ("#!/bin/sh\n"
195           "set%s\n"
196           "while true; do\n"
197           " for i do\n"
198           "  ip=$i\n"
199           "  hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
200           "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
201           "  RES=$?\n"
202           "  if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
203           "  else echo $ip 'not reachable';fi;\n"
204           " done\n"
205           " sleep 1\n"
206           "done\n"
207           % ips)
208     return (u1 + u2)
209
210
211 def get_installerHandler():
212     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
213     installer_ip = get_installer_ip()
214
215     if installer_type not in ["fuel", "apex"]:
216         logger.warn("installer type %s is neither fuel nor apex."
217                     "returning None for installer handler" % installer_type)
218         return None
219     else:
220         if installer_type in ["apex"]:
221             developHandler = DeploymentFactory.get_handler(
222                 installer_type,
223                 installer_ip,
224                 'root',
225                 pkey_file="/root/.ssh/id_rsa")
226
227         if installer_type in ["fuel"]:
228             developHandler = DeploymentFactory.get_handler(
229                 installer_type,
230                 installer_ip,
231                 'root',
232                 'r00tme')
233         return developHandler
234
235
236 def get_nodes():
237     developHandler = get_installerHandler()
238     return developHandler.get_nodes()
239
240
241 def get_installer_ip():
242     return str(os.environ['INSTALLER_IP'])
243
244
245 def get_instance_ip(instance):
246     instance_ip = instance.networks.itervalues().next()[0]
247     return instance_ip
248
249
250 def wait_for_instance(instance, pattern=".* login:"):
251     logger.info("Waiting for instance %s to boot up" % instance.id)
252     tries = 40
253     sleep_time = 2
254     expected_regex = re.compile(pattern)
255     console_log = ""
256     while tries > 0 and not expected_regex.search(console_log):
257         console_log = instance.get_console_output()
258         time.sleep(sleep_time)
259         tries -= 1
260
261     if not expected_regex.search(console_log):
262         logger.error("Instance %s does not boot up properly."
263                      % instance.id)
264         return False
265     return True
266
267
268 def wait_for_instances_up(*instances):
269     check = [wait_for_instance(instance) for instance in instances]
270     return all(check)
271
272
273 def wait_for_instances_get_dhcp(*instances):
274     check = [wait_for_instance(instance, "Lease of .* obtained")
275              for instance in instances]
276     return all(check)
277
278
279 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
280     tries = 30
281     sleep_time = 1
282     nets = []
283     logger.debug("Waiting for network %s to associate with BGPVPN %s "
284                  % (bgpvpn_id, net_id))
285
286     while tries > 0 and net_id not in nets:
287         nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
288         time.sleep(sleep_time)
289         tries -= 1
290     if net_id not in nets:
291         logger.error("Association of network %s with BGPVPN %s failed" %
292                      (net_id, bgpvpn_id))
293         return False
294     return True
295
296
297 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
298     check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
299              for id in args]
300     # Return True if all associations succeeded
301     return all(check)
302
303
304 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
305     tries = 30
306     sleep_time = 1
307     routers = []
308     logger.debug("Waiting for router %s to associate with BGPVPN %s "
309                  % (bgpvpn_id, router_id))
310     while tries > 0 and router_id not in routers:
311         routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
312         time.sleep(sleep_time)
313         tries -= 1
314     if router_id not in routers:
315         logger.error("Association of router %s with BGPVPN %s failed" %
316                      (router_id, bgpvpn_id))
317         return False
318     return True
319
320
321 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
322     check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
323              for id in args]
324     # Return True if all associations succeeded
325     return all(check)
326
327
328 def wait_before_subtest(*args, **kwargs):
329     ''' This is a placeholder.
330         TODO: Replace delay with polling logic. '''
331     time.sleep(30)
332
333
334 def assert_and_get_compute_nodes(nova_client, required_node_number=2):
335     """Get the compute nodes in the deployment
336     Exit if the deployment doesn't have enough compute nodes"""
337     compute_nodes = os_utils.get_hypervisors(nova_client)
338
339     num_compute_nodes = len(compute_nodes)
340     if num_compute_nodes < 2:
341         logger.error("There are %s compute nodes in the deployment. "
342                      "Minimum number of nodes to complete the test is 2."
343                      % num_compute_nodes)
344         sys.exit(-1)
345
346     logger.debug("Compute nodes: %s" % compute_nodes)
347     return compute_nodes
348
349
350 def open_icmp(neutron_client, security_group_id):
351     if os_utils.check_security_group_rules(neutron_client,
352                                            security_group_id,
353                                            'ingress',
354                                            'icmp'):
355
356         if not os_utils.create_secgroup_rule(neutron_client,
357                                              security_group_id,
358                                              'ingress',
359                                              'icmp'):
360             logger.error("Failed to create icmp security group rule...")
361     else:
362         logger.info("This rule exists for security group: %s"
363                     % security_group_id)
364
365
366 def open_http_port(neutron_client, security_group_id):
367     if os_utils.check_security_group_rules(neutron_client,
368                                            security_group_id,
369                                            'ingress',
370                                            'tcp',
371                                            80, 80):
372
373         if not os_utils.create_secgroup_rule(neutron_client,
374                                              security_group_id,
375                                              'ingress',
376                                              'tcp',
377                                              80, 80):
378
379             logger.error("Failed to create http security group rule...")
380     else:
381         logger.info("This rule exists for security group: %s"
382                     % security_group_id)
383
384
385 def open_bgp_port(neutron_client, security_group_id):
386     if os_utils.check_security_group_rules(neutron_client,
387                                            security_group_id,
388                                            'ingress',
389                                            'tcp',
390                                            179, 179):
391
392         if not os_utils.create_secgroup_rule(neutron_client,
393                                              security_group_id,
394                                              'ingress',
395                                              'tcp',
396                                              179, 179):
397             logger.error("Failed to create bgp security group rule...")
398     else:
399         logger.info("This rule exists for security group: %s"
400                     % security_group_id)
401
402
403 def exec_cmd(cmd, verbose):
404     success = True
405     logger.debug("Executing '%s'" % cmd)
406     p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
407                          stderr=subprocess.STDOUT)
408     output = ""
409     for line in iter(p.stdout.readline, b''):
410         output += line
411
412     if verbose:
413         logger.debug(output)
414
415     p.stdout.close()
416     returncode = p.wait()
417     if returncode != 0:
418         logger.error("Command %s failed to execute." % cmd)
419         success = False
420
421     return output, success
422
423
424 def check_odl_fib(ip, controller_ip):
425     """Check that there is an entry in the ODL Fib for `ip`"""
426     url = "http://" + controller_ip + \
427           ":8181/restconf/config/odl-fib:fibEntries/"
428     logger.debug("Querring '%s' for FIB entries", url)
429     res = requests.get(url, auth=(ODL_USER, ODL_PASS))
430     if res.status_code != 200:
431         logger.error("OpenDaylight response status code: %s", res.status_code)
432         return False
433     logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
434                  % controller_ip)
435     logger.debug("OpenDaylight FIB: \n%s" % res.text)
436     return ip in res.text
437
438
439 def run_odl_cmd(odl_node, cmd):
440     '''Run a command in the OpenDaylight Karaf shell
441     This is a bit flimsy because of shell quote escaping, make sure that
442     the cmd passed does not have any top level double quotes or this
443     function will break.
444     The /dev/null is used because client works, but outputs something
445     that contains "ERROR" and run_cmd doesn't like that.
446     '''
447     karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
448                  ' 2>/dev/null' % cmd)
449     return odl_node.run_cmd(karaf_cmd)
450
451
452 def wait_for_cloud_init(instance):
453     success = True
454     # ubuntu images take a long time to start
455     tries = 20
456     sleep_time = 30
457     logger.info("Waiting for cloud init of instance: {}"
458                 "".format(instance.name))
459     while tries > 0:
460         instance_log = instance.get_console_output()
461         if "Failed to run module" in instance_log:
462             success = False
463             logger.error("Cloud init failed to run. Reason: %s",
464                          instance_log)
465             break
466         if re.search(r"Cloud-init v. .+ finished at", instance_log):
467             success = True
468             break
469         time.sleep(sleep_time)
470         tries = tries - 1
471
472     if tries == 0:
473         logger.error("Cloud init timed out"
474                      ". Reason: %s",
475                      instance_log)
476         success = False
477     logger.info("Finished waiting for cloud init of instance {} result was {}"
478                 "".format(instance.name, success))
479     return success
480
481
482 def attach_instance_to_ext_br(instance, compute_node):
483     libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
484     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
485     if installer_type == "fuel":
486         bridge = "br-ex"
487     elif installer_type == "apex":
488         # In Apex, br-ex is an ovs bridge and virsh attach-interface
489         # won't just work. We work around it by creating a linux
490         # bridge, attaching that to br-ex with a veth pair
491         # and virsh-attaching the instance to the linux-bridge
492         bridge = "br-quagga"
493         cmd = """
494         set -e
495         if ! sudo brctl show |grep -q ^{bridge};then
496           sudo brctl addbr {bridge}
497           sudo ip link set {bridge} up
498           sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
499           sudo ip link set dev ovs-quagga-tap up
500           sudo ip link set dev quagga-tap up
501           sudo ovs-vsctl add-port br-ex ovs-quagga-tap
502           sudo brctl addif {bridge} quagga-tap
503         fi
504         """
505         compute_node.run_cmd(cmd.format(bridge=bridge))
506
507     compute_node.run_cmd("sudo virsh attach-interface %s"
508                          " bridge %s" % (libvirt_instance_name, bridge))
509
510
511 def detach_instance_from_ext_br(instance, compute_node):
512     libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
513     mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
514                                "grep running | awk '{print $2}'); "
515                                "do echo -n ; sudo virsh dumpxml $vm| "
516                                "grep -oP '52:54:[\da-f:]+' ;done")
517     compute_node.run_cmd("sudo virsh detach-interface --domain %s"
518                          " --type bridge --mac %s"
519                          % (libvirt_instance_name, mac))
520
521     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
522     if installer_type == "fuel":
523         bridge = "br-ex"
524     elif installer_type == "apex":
525         # In Apex, br-ex is an ovs bridge and virsh attach-interface
526         # won't just work. We work around it by creating a linux
527         # bridge, attaching that to br-ex with a veth pair
528         # and virsh-attaching the instance to the linux-bridge
529         bridge = "br-quagga"
530         cmd = """
531             sudo brctl delif {bridge} quagga-tap &&
532             sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
533             sudo ip link set dev quagga-tap down &&
534             sudo ip link set dev ovs-quagga-tap down &&
535             sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
536             sudo ip link set {bridge} down &&
537             sudo brctl delbr {bridge}
538         """
539         compute_node.run_cmd(cmd.format(bridge=bridge))
540
541
542 def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
543                     subnet_ids, router_ids, network_ids):
544
545     if len(floatingip_ids) != 0:
546         for floatingip_id in floatingip_ids:
547             if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
548                 logging.error('Fail to delete all floating ips. '
549                               'Floating ip with id {} was not deleted.'.
550                               format(floatingip_id))
551                 return False
552
553     if len(bgpvpn_ids) != 0:
554         for bgpvpn_id in bgpvpn_ids:
555             delete_bgpvpn(neutron_client, bgpvpn_id)
556
557     if len(interfaces) != 0:
558         for router_id, subnet_id in interfaces:
559             if not os_utils.remove_interface_router(neutron_client,
560                                                     router_id, subnet_id):
561                 logging.error('Fail to delete all interface routers. '
562                               'Interface router with id {} was not deleted.'.
563                               format(router_id))
564
565     if len(router_ids) != 0:
566         for router_id in router_ids:
567             if not os_utils.remove_gateway_router(neutron_client, router_id):
568                 logging.error('Fail to delete all gateway routers. '
569                               'Gateway router with id {} was not deleted.'.
570                               format(router_id))
571
572     if len(subnet_ids) != 0:
573         for subnet_id in subnet_ids:
574             if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
575                 logging.error('Fail to delete all subnets. '
576                               'Subnet with id {} was not deleted.'.
577                               format(subnet_id))
578                 return False
579
580     if len(router_ids) != 0:
581         for router_id in router_ids:
582             if not os_utils.delete_neutron_router(neutron_client, router_id):
583                 logging.error('Fail to delete all routers. '
584                               'Router with id {} was not deleted.'.
585                               format(router_id))
586                 return False
587
588     if len(network_ids) != 0:
589         for network_id in network_ids:
590             if not os_utils.delete_neutron_net(neutron_client, network_id):
591                 logging.error('Fail to delete all networks. '
592                               'Network with id {} was not deleted.'.
593                               format(network_id))
594                 return False
595     return True
596
597
598 def cleanup_nova(nova_client, instance_ids):
599     if len(instance_ids) != 0:
600         for instance_id in instance_ids:
601             if not os_utils.delete_instance(nova_client, instance_id):
602                 logging.error('Fail to delete all instances. '
603                               'Instance with id {} was not deleted.'.
604                               format(instance_id))
605                 return False
606     return True
607
608
609 def cleanup_glance(glance_client, image_ids):
610     if len(image_ids) != 0:
611         for image_id in image_ids:
612             if not os_utils.delete_glance_image(glance_client, image_id):
613                 logging.error('Fail to delete all images. '
614                               'Image with id {} was not deleted.'.
615                               format(image_id))
616                 return False
617     return True
618
619
620 def create_bgpvpn(neutron_client, **kwargs):
621     # route_distinguishers
622     # route_targets
623     json_body = {"bgpvpn": kwargs}
624     return neutron_client.create_bgpvpn(json_body)
625
626
627 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
628     json_body = {"bgpvpn": kwargs}
629     return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
630
631
632 def delete_bgpvpn(neutron_client, bgpvpn_id):
633     return neutron_client.delete_bgpvpn(bgpvpn_id)
634
635
636 def get_bgpvpn(neutron_client, bgpvpn_id):
637     return neutron_client.show_bgpvpn(bgpvpn_id)
638
639
640 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
641     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
642
643
644 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
645     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
646
647
648 def create_router_association(neutron_client, bgpvpn_id, router_id):
649     json_body = {"router_association": {"router_id": router_id}}
650     return neutron_client.create_router_association(bgpvpn_id, json_body)
651
652
653 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
654     json_body = {"network_association": {"network_id": neutron_network_id}}
655     return neutron_client.create_network_association(bgpvpn_id, json_body)
656
657
658 def is_fail_mode_secure():
659     """
660     Checks the value of the attribute fail_mode,
661     if it is set to secure. This check is performed
662     on all OVS br-int interfaces, for all OpenStack nodes.
663     """
664     is_secure = {}
665     openstack_nodes = get_nodes()
666     get_ovs_int_cmd = ("sudo ovs-vsctl show | "
667                        "grep -i bridge | "
668                        "awk '{print $2}'")
669     # Define OVS get fail_mode command
670     get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
671     for openstack_node in openstack_nodes:
672         if not openstack_node.is_active():
673             continue
674
675         ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
676                         strip().split('\n'))
677         if 'br-int' in ovs_int_list:
678             # Execute get fail_mode command
679             br_int_fail_mode = (openstack_node.
680                                 run_cmd(get_ovs_fail_mode_cmd).strip())
681             if br_int_fail_mode == 'secure':
682                 # success
683                 is_secure[openstack_node.name] = True
684             else:
685                 # failure
686                 logging.error('The fail_mode for br-int was not secure '
687                               'in {} node'.format(openstack_node.name))
688                 is_secure[openstack_node.name] = False
689     return is_secure
690
691
692 def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
693                                 subnet_quota, port_quota):
694     json_body = {"quota": {
695         "network": nw_quota,
696         "subnet": subnet_quota,
697         "port": port_quota
698     }}
699
700     try:
701         neutron_client.update_quota(tenant_id=tenant_id,
702                                     body=json_body)
703         return True
704     except Exception as e:
705         logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
706                      " '%s', '%s', '%s', '%s')]: %s" %
707                      (tenant_id, nw_quota, subnet_quota, port_quota, e))
708         return False
709
710
711 def update_instance_quota_class(nova_client, instances_quota):
712     try:
713         nova_client.quota_classes.update("default", instances=instances_quota)
714         return True
715     except Exception as e:
716         logger.error("Error [update_instance_quota_class(nova_client,"
717                      " '%s' )]: %s" % (instances_quota, e))
718         return False
719
720
721 def get_neutron_quota(neutron_client, tenant_id):
722     try:
723         return neutron_client.show_quota(tenant_id=tenant_id)['quota']
724     except Exception as e:
725         logger.error("Error in getting neutron quota for tenant "
726                      " '%s' )]: %s" % (tenant_id, e))
727         raise
728
729
730 def get_nova_instances_quota(nova_client):
731     try:
732         return nova_client.quota_classes.get("default").instances
733     except Exception as e:
734         logger.error("Error in getting nova instances quota: %s" % e)
735         raise
736
737
738 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
739     """
740     Gets, as input, a list of compute nodes and a list of OVS bridges
741     and returns the command console output, as a list of lines, that
742     contains all the OVS groups from all bridges and nodes in lists.
743     """
744     cmd_out_lines = []
745     for compute_node in compute_node_list:
746         for ovs_br in ovs_br_list:
747             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
748                 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
749                                   "grep group".format(ovs_br, of_protocol))
750                 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
751                                   split("\n"))
752     return cmd_out_lines