Add test case 9
[sdnvpn.git] / sdnvpn / lib / utils.py
1 #!/usr/bin/python
2 #
3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 import logging
11 import os
12 import sys
13 import time
14 import requests
15 import re
16 import subprocess
17
18 import functest.utils.openstack_utils as os_utils
19 from opnfv.deployment.factory import Factory as DeploymentFactory
20
21 from sdnvpn.lib import config as sdnvpn_config
22
23 logger = logging.getLogger('sndvpn_test_utils')
24
25 common_config = sdnvpn_config.CommonConfig()
26
27 ODL_USER = 'admin'
28 ODL_PASS = 'admin'
29
30
31 def create_custom_flavor():
32     return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
33                                          common_config.custom_flavor_ram,
34                                          common_config.custom_flavor_disk,
35                                          common_config.custom_flavor_vcpus)
36
37
38 def create_net(neutron_client, name):
39     logger.debug("Creating network %s", name)
40     net_id = os_utils.create_neutron_net(neutron_client, name)
41     if not net_id:
42         logger.error(
43             "There has been a problem when creating the neutron network")
44         sys.exit(-1)
45     return net_id
46
47
48 def create_subnet(neutron_client, name, cidr, net_id):
49     logger.debug("Creating subnet %s in network %s with cidr %s",
50                  name, net_id, cidr)
51     subnet_id = os_utils.create_neutron_subnet(neutron_client,
52                                                name,
53                                                cidr,
54                                                net_id)
55     if not subnet_id:
56         logger.error(
57             "There has been a problem when creating the neutron subnet")
58         sys.exit(-1)
59     return subnet_id
60
61
62 def create_network(neutron_client, net, subnet1, cidr1,
63                    router, subnet2=None, cidr2=None):
64     """Network assoc won't work for networks/subnets created by this function.
65
66     It is an ODL limitation due to it handling routers as vpns.
67     See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
68     network_dic = os_utils.create_network_full(neutron_client,
69                                                net,
70                                                subnet1,
71                                                router,
72                                                cidr1)
73     if not network_dic:
74         logger.error(
75             "There has been a problem when creating the neutron network")
76         sys.exit(-1)
77     net_id = network_dic["net_id"]
78     subnet_id = network_dic["subnet_id"]
79     router_id = network_dic["router_id"]
80
81     if subnet2 is not None:
82         logger.debug("Creating and attaching a second subnet...")
83         subnet_id = os_utils.create_neutron_subnet(
84             neutron_client, subnet2, cidr2, net_id)
85         if not subnet_id:
86             logger.error(
87                 "There has been a problem when creating the second subnet")
88             sys.exit(-1)
89         logger.debug("Subnet '%s' created successfully" % subnet_id)
90     return net_id, subnet_id, router_id
91
92
93 def create_instance(nova_client,
94                     name,
95                     image_id,
96                     network_id,
97                     sg_id,
98                     secgroup_name=None,
99                     fixed_ip=None,
100                     compute_node='',
101                     userdata=None,
102                     files=None,
103                     **kwargs
104                     ):
105     if 'flavor' not in kwargs:
106         kwargs['flavor'] = common_config.default_flavor
107
108     logger.info("Creating instance '%s'..." % name)
109     logger.debug(
110         "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
111         " network=%s\n secgroup=%s \n hypervisor=%s \n"
112         " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
113         % (name, kwargs['flavor'], image_id, network_id, sg_id,
114            compute_node, fixed_ip, files, userdata))
115     instance = os_utils.create_instance_and_wait_for_active(
116         kwargs['flavor'],
117         image_id,
118         network_id,
119         name,
120         config_drive=True,
121         userdata=userdata,
122         av_zone=compute_node,
123         fixed_ip=fixed_ip,
124         files=files)
125
126     if instance is None:
127         logger.error("Error while booting instance.")
128         sys.exit(-1)
129     else:
130         logger.debug("Instance '%s' booted successfully. IP='%s'." %
131                      (name, instance.networks.itervalues().next()[0]))
132     # Retrieve IP of INSTANCE
133     # instance_ip = instance.networks.get(network_id)[0]
134
135     if secgroup_name:
136         logger.debug("Adding '%s' to security group '%s'..."
137                      % (name, secgroup_name))
138     else:
139         logger.debug("Adding '%s' to security group '%s'..."
140                      % (name, sg_id))
141     os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
142
143     return instance
144
145
146 def generate_ping_userdata(ips_array):
147     ips = ""
148     for ip in ips_array:
149         ips = ("%s %s" % (ips, ip))
150
151     ips = ips.replace('  ', ' ')
152     return ("#!/bin/sh\n"
153             "set%s\n"
154             "while true; do\n"
155             " for i do\n"
156             "  ip=$i\n"
157             "  ping -c 10 $ip 2>&1 >/dev/null\n"
158             "  RES=$?\n"
159             "  if [ \"Z$RES\" = \"Z0\" ] ; then\n"
160             "   echo ping $ip OK\n"
161             "  else echo ping $ip KO\n"
162             "  fi\n"
163             " done\n"
164             " sleep 1\n"
165             "done\n"
166             % ips)
167
168
169 def generate_userdata_common():
170     return ("#!/bin/sh\n"
171             "sudo mkdir -p /home/cirros/.ssh/\n"
172             "sudo chown cirros:cirros /home/cirros/.ssh/\n"
173             "sudo chown cirros:cirros /home/cirros/id_rsa\n"
174             "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
175             "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
176             "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
177             "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
178             "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
179             "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
180             "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
181             "chmod 700 /home/cirros/.ssh\n"
182             "chmod 644 /home/cirros/.ssh/authorized_keys\n"
183             "chmod 600 /home/cirros/.ssh/id_rsa\n"
184             )
185
186
187 def generate_userdata_with_ssh(ips_array):
188     u1 = generate_userdata_common()
189
190     ips = ""
191     for ip in ips_array:
192         ips = ("%s %s" % (ips, ip))
193
194     ips = ips.replace('  ', ' ')
195     u2 = ("#!/bin/sh\n"
196           "set%s\n"
197           "while true; do\n"
198           " for i do\n"
199           "  ip=$i\n"
200           "  hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
201           "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
202           "  RES=$?\n"
203           "  if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
204           "  else echo $ip 'not reachable';fi;\n"
205           " done\n"
206           " sleep 1\n"
207           "done\n"
208           % ips)
209     return (u1 + u2)
210
211
212 def get_installerHandler():
213     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
214     installer_ip = get_installer_ip()
215
216     if installer_type not in ["fuel", "apex"]:
217         raise ValueError("%s is not supported" % installer_type)
218     else:
219         if installer_type in ["apex"]:
220             developHandler = DeploymentFactory.get_handler(
221                 installer_type,
222                 installer_ip,
223                 'root',
224                 pkey_file="/root/.ssh/id_rsa")
225
226         if installer_type in ["fuel"]:
227             developHandler = DeploymentFactory.get_handler(
228                 installer_type,
229                 installer_ip,
230                 'root',
231                 'r00tme')
232         return developHandler
233
234
235 def get_nodes():
236     developHandler = get_installerHandler()
237     return developHandler.get_nodes()
238
239
240 def get_installer_ip():
241     return str(os.environ['INSTALLER_IP'])
242
243
244 def get_instance_ip(instance):
245     instance_ip = instance.networks.itervalues().next()[0]
246     return instance_ip
247
248
249 def wait_for_instance(instance):
250     logger.info("Waiting for instance %s to get a DHCP lease..." % instance.id)
251     # The sleep this function replaced waited for 80s
252     tries = 40
253     sleep_time = 2
254     pattern = "Lease of .* obtained, lease time"
255     expected_regex = re.compile(pattern)
256     console_log = ""
257     while tries > 0 and not expected_regex.search(console_log):
258         console_log = instance.get_console_output()
259         time.sleep(sleep_time)
260         tries -= 1
261
262     if not expected_regex.search(console_log):
263         logger.error("Instance %s seems to have failed leasing an IP."
264                      % instance.id)
265         return False
266     return True
267
268
269 def wait_for_instances_up(*args):
270     check = [wait_for_instance(instance) for instance in args]
271     return all(check)
272
273
274 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
275     tries = 30
276     sleep_time = 1
277     nets = []
278     logger.debug("Waiting for network %s to associate with BGPVPN %s "
279                  % (bgpvpn_id, net_id))
280
281     while tries > 0 and net_id not in nets:
282         nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
283         time.sleep(sleep_time)
284         tries -= 1
285     if net_id not in nets:
286         logger.error("Association of network %s with BGPVPN %s failed" %
287                      (net_id, bgpvpn_id))
288         return False
289     return True
290
291
292 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
293     check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
294              for id in args]
295     # Return True if all associations succeeded
296     return all(check)
297
298
299 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
300     tries = 30
301     sleep_time = 1
302     routers = []
303     logger.debug("Waiting for router %s to associate with BGPVPN %s "
304                  % (bgpvpn_id, router_id))
305     while tries > 0 and router_id not in routers:
306         routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
307         time.sleep(sleep_time)
308         tries -= 1
309     if router_id not in routers:
310         logger.error("Association of router %s with BGPVPN %s failed" %
311                      (router_id, bgpvpn_id))
312         return False
313     return True
314
315
316 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
317     check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
318              for id in args]
319     # Return True if all associations succeeded
320     return all(check)
321
322
323 def wait_before_subtest(*args, **kwargs):
324     ''' This is a placeholder.
325         TODO: Replace delay with polling logic. '''
326     time.sleep(30)
327
328
329 def assert_and_get_compute_nodes(nova_client, required_node_number=2):
330     """Get the compute nodes in the deployment
331
332     Exit if the deployment doesn't have enough compute nodes"""
333     compute_nodes = os_utils.get_hypervisors(nova_client)
334
335     num_compute_nodes = len(compute_nodes)
336     if num_compute_nodes < 2:
337         logger.error("There are %s compute nodes in the deployment. "
338                      "Minimum number of nodes to complete the test is 2."
339                      % num_compute_nodes)
340         sys.exit(-1)
341
342     logger.debug("Compute nodes: %s" % compute_nodes)
343     return compute_nodes
344
345
346 def open_icmp(neutron_client, security_group_id):
347     if os_utils.check_security_group_rules(neutron_client,
348                                            security_group_id,
349                                            'ingress',
350                                            'icmp'):
351
352         if not os_utils.create_secgroup_rule(neutron_client,
353                                              security_group_id,
354                                              'ingress',
355                                              'icmp'):
356             logger.error("Failed to create icmp security group rule...")
357     else:
358         logger.info("This rule exists for security group: %s"
359                     % security_group_id)
360
361
362 def open_http_port(neutron_client, security_group_id):
363     if os_utils.check_security_group_rules(neutron_client,
364                                            security_group_id,
365                                            'ingress',
366                                            'tcp',
367                                            80, 80):
368
369         if not os_utils.create_secgroup_rule(neutron_client,
370                                              security_group_id,
371                                              'ingress',
372                                              'tcp',
373                                              80, 80):
374
375             logger.error("Failed to create http security group rule...")
376     else:
377         logger.info("This rule exists for security group: %s"
378                     % security_group_id)
379
380
381 def open_bgp_port(neutron_client, security_group_id):
382     if os_utils.check_security_group_rules(neutron_client,
383                                            security_group_id,
384                                            'ingress',
385                                            'tcp',
386                                            179, 179):
387
388         if not os_utils.create_secgroup_rule(neutron_client,
389                                              security_group_id,
390                                              'ingress',
391                                              'tcp',
392                                              179, 179):
393             logger.error("Failed to create bgp security group rule...")
394     else:
395         logger.info("This rule exists for security group: %s"
396                     % security_group_id)
397
398
399 def exec_cmd(cmd, verbose):
400     success = True
401     logger.debug("Executing '%s'" % cmd)
402     p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
403                          stderr=subprocess.STDOUT)
404     output = ""
405     for line in iter(p.stdout.readline, b''):
406         output += line
407
408     if verbose:
409         logger.debug(output)
410
411     p.stdout.close()
412     returncode = p.wait()
413     if returncode != 0:
414         logger.error("Command %s failed to execute." % cmd)
415         success = False
416
417     return output, success
418
419
420 def check_odl_fib(ip, controller_ip):
421     """Check that there is an entry in the ODL Fib for `ip`"""
422     url = "http://" + controller_ip + \
423           ":8181/restconf/config/odl-fib:fibEntries/"
424     logger.debug("Querring '%s' for FIB entries", url)
425     res = requests.get(url, auth=(ODL_USER, ODL_PASS))
426     if res.status_code != 200:
427         logger.error("OpenDaylight response status code: %s", res.status_code)
428         return False
429     logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
430                  % controller_ip)
431     logger.debug("OpenDaylight FIB: \n%s" % res.text)
432     return ip in res.text
433
434
435 def run_odl_cmd(odl_node, cmd):
436     '''Run a command in the OpenDaylight Karaf shell
437
438     This is a bit flimsy because of shell quote escaping, make sure that
439     the cmd passed does not have any top level double quotes or this
440     function will break.
441
442     The /dev/null is used because client works, but outputs something
443     that contains "ERROR" and run_cmd doesn't like that.
444
445     '''
446     karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
447                  ' 2>/dev/null' % cmd)
448     return odl_node.run_cmd(karaf_cmd)
449
450
451 def wait_for_cloud_init(instance):
452     success = True
453     # ubuntu images take a long time to start
454     tries = 20
455     sleep_time = 30
456     logger.info("Waiting for cloud init of instance: {}"
457                 "".format(instance.name))
458     while tries > 0:
459         instance_log = instance.get_console_output()
460         if "Failed to run module" in instance_log:
461             success = False
462             logger.error("Cloud init failed to run. Reason: %s",
463                          instance_log)
464             break
465         if re.search(r"Cloud-init v. .+ finished at", instance_log):
466             success = True
467             break
468         time.sleep(sleep_time)
469         tries = tries - 1
470
471     if tries == 0:
472         logger.error("Cloud init timed out"
473                      ". Reason: %s",
474                      instance_log)
475         success = False
476     logger.info("Finished waiting for cloud init of instance {} result was {}"
477                 "".format(instance.name, success))
478     return success
479
480
481 def attach_instance_to_ext_br(instance, compute_node):
482     libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
483     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
484     if installer_type == "fuel":
485         bridge = "br-ex"
486     elif installer_type == "apex":
487         # In Apex, br-ex is an ovs bridge and virsh attach-interface
488         # won't just work. We work around it by creating a linux
489         # bridge, attaching that to br-ex with a veth pair
490         # and virsh-attaching the instance to the linux-bridge
491         bridge = "br-quagga"
492         cmd = """
493         set -e
494         if ! sudo brctl show |grep -q ^{bridge};then
495           sudo brctl addbr {bridge}
496           sudo ip link set {bridge} up
497           sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
498           sudo ip link set dev ovs-quagga-tap up
499           sudo ip link set dev quagga-tap up
500           sudo ovs-vsctl add-port br-ex ovs-quagga-tap
501           sudo brctl addif {bridge} quagga-tap
502         fi
503         """
504         compute_node.run_cmd(cmd.format(bridge=bridge))
505
506     compute_node.run_cmd("sudo virsh attach-interface %s"
507                          " bridge %s" % (libvirt_instance_name, bridge))
508
509
510 def detach_instance_from_ext_br(instance, compute_node):
511     libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
512     mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
513                                "grep running | awk '{print $2}'); "
514                                "do echo -n ; sudo virsh dumpxml $vm| "
515                                "grep -oP '52:54:[\da-f:]+' ;done")
516     compute_node.run_cmd("sudo virsh detach-interface --domain %s"
517                          " --type bridge --mac %s"
518                          % (libvirt_instance_name, mac))
519
520     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
521     if installer_type == "fuel":
522         bridge = "br-ex"
523     elif installer_type == "apex":
524         # In Apex, br-ex is an ovs bridge and virsh attach-interface
525         # won't just work. We work around it by creating a linux
526         # bridge, attaching that to br-ex with a veth pair
527         # and virsh-attaching the instance to the linux-bridge
528         bridge = "br-quagga"
529         cmd = """
530             sudo brctl delif {bridge} quagga-tap &&
531             sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
532             sudo ip link set dev quagga-tap down &&
533             sudo ip link set dev ovs-quagga-tap down &&
534             sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
535             sudo ip link set {bridge} down &&
536             sudo brctl delbr {bridge}
537         """
538         compute_node.run_cmd(cmd.format(bridge=bridge))
539
540
541 def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
542                     subnet_ids, router_ids, network_ids):
543
544     if len(floatingip_ids) != 0:
545         for floatingip_id in floatingip_ids:
546             if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
547                 logging.error('Fail to delete all floating ips. '
548                               'Floating ip with id {} was not deleted.'.
549                               format(floatingip_id))
550                 return False
551
552     if len(bgpvpn_ids) != 0:
553         for bgpvpn_id in bgpvpn_ids:
554             delete_bgpvpn(neutron_client, bgpvpn_id)
555
556     if len(interfaces) != 0:
557         for router_id, subnet_id in interfaces:
558             if not os_utils.remove_interface_router(neutron_client,
559                                                     router_id, subnet_id):
560                 logging.error('Fail to delete all interface routers. '
561                               'Interface router with id {} was not deleted.'.
562                               format(router_id))
563
564     if len(router_ids) != 0:
565         for router_id in router_ids:
566             if not os_utils.remove_gateway_router(neutron_client, router_id):
567                 logging.error('Fail to delete all gateway routers. '
568                               'Gateway router with id {} was not deleted.'.
569                               format(router_id))
570
571     if len(subnet_ids) != 0:
572         for subnet_id in subnet_ids:
573             if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
574                 logging.error('Fail to delete all subnets. '
575                               'Subnet with id {} was not deleted.'.
576                               format(subnet_id))
577                 return False
578
579     if len(router_ids) != 0:
580         for router_id in router_ids:
581             if not os_utils.delete_neutron_router(neutron_client, router_id):
582                 logging.error('Fail to delete all routers. '
583                               'Router with id {} was not deleted.'.
584                               format(router_id))
585                 return False
586
587     if len(network_ids) != 0:
588         for network_id in network_ids:
589             if not os_utils.delete_neutron_net(neutron_client, network_id):
590                 logging.error('Fail to delete all networks. '
591                               'Network with id {} was not deleted.'.
592                               format(network_id))
593                 return False
594     return True
595
596
597 def cleanup_nova(nova_client, instance_ids, image_ids):
598     if len(instance_ids) != 0:
599         for instance_id in instance_ids:
600             if not os_utils.delete_instance(nova_client, instance_id):
601                 logging.error('Fail to delete all instances. '
602                               'Instance with id {} was not deleted.'.
603                               format(instance_id))
604                 return False
605
606     if len(image_ids) != 0:
607         for image_id in image_ids:
608             if not os_utils.delete_glance_image(nova_client, image_id):
609                 logging.error('Fail to delete all images. '
610                               'Image with id {} was not deleted.'.
611                               format(image_id))
612                 return False
613     return True
614
615
616 def create_bgpvpn(neutron_client, **kwargs):
617     # route_distinguishers
618     # route_targets
619     json_body = {"bgpvpn": kwargs}
620     return neutron_client.create_bgpvpn(json_body)
621
622
623 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
624     json_body = {"bgpvpn": kwargs}
625     return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
626
627
628 def delete_bgpvpn(neutron_client, bgpvpn_id):
629     return neutron_client.delete_bgpvpn(bgpvpn_id)
630
631
632 def get_bgpvpn(neutron_client, bgpvpn_id):
633     return neutron_client.show_bgpvpn(bgpvpn_id)
634
635
636 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
637     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
638
639
640 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
641     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
642
643
644 def create_router_association(neutron_client, bgpvpn_id, router_id):
645     json_body = {"router_association": {"router_id": router_id}}
646     return neutron_client.create_router_association(bgpvpn_id, json_body)
647
648
649 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
650     json_body = {"network_association": {"network_id": neutron_network_id}}
651     return neutron_client.create_network_association(bgpvpn_id, json_body)
652
653
654 def is_fail_mode_secure():
655     """
656     Checks the value of the attribute fail_mode,
657     if it is set to secure. This check is performed
658     on all OVS br-int interfaces, for all OpenStack nodes.
659     """
660     is_secure = {}
661     openstack_nodes = get_nodes()
662     get_ovs_int_cmd = ("sudo ovs-vsctl show | "
663                        "grep -i bridge | "
664                        "awk '{print $2}'")
665     # Define OVS get fail_mode command
666     get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
667     for openstack_node in openstack_nodes:
668         if not openstack_node.is_active():
669             continue
670
671         ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
672                         strip().split('\n'))
673         if 'br-int' in ovs_int_list:
674             # Execute get fail_mode command
675             br_int_fail_mode = (openstack_node.
676                                 run_cmd(get_ovs_fail_mode_cmd).strip())
677             if br_int_fail_mode == 'secure':
678                 # success
679                 is_secure[openstack_node.name] = True
680             else:
681                 # failure
682                 logging.error('The fail_mode for br-int was not secure '
683                               'in {} node'.format(openstack_node.name))
684                 is_secure[openstack_node.name] = False
685     return is_secure