Merge "Adding L2GW Scenario"
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import shutil
16 import uuid
17 import struct
18 import time
19
20 from apex.common import constants as con
21 from apex.common.exceptions import ApexDeployException
22 from apex.common import parsers
23 from apex.virtual import utils as virt_utils
24 from cryptography.hazmat.primitives import serialization as \
25     crypto_serialization
26 from cryptography.hazmat.primitives.asymmetric import rsa
27 from cryptography.hazmat.backends import default_backend as \
28     crypto_default_backend
29
30
31 SDN_FILE_MAP = {
32     'opendaylight': {
33         'sfc': 'neutron-sfc-opendaylight.yaml',
34         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
35         'gluon': 'gluon.yaml',
36         'vpp': {
37             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
38             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
39             'default': 'neutron-opendaylight-honeycomb.yaml'
40         },
41         'l2gw': 'neutron-l2gw-opendaylight.yaml',
42         'default': 'neutron-opendaylight.yaml',
43     },
44     'onos': {
45         'sfc': 'neutron-onos-sfc.yaml',
46         'default': 'neutron-onos.yaml'
47     },
48     'ovn': 'neutron-ml2-ovn.yaml',
49     False: {
50         'vpp': 'neutron-ml2-vpp.yaml',
51         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
52     }
53 }
54
55 OTHER_FILE_MAP = {
56     'tacker': 'enable_tacker.yaml',
57     'congress': 'enable_congress.yaml',
58     'barometer': 'enable_barometer.yaml',
59     'rt_kvm': 'enable_rt_kvm.yaml'
60 }
61
62 OVS_PERF_MAP = {
63     'HostCpusList': 'dpdk_cores',
64     'NeutronDpdkCoreList': 'pmd_cores',
65     'NeutronDpdkSocketMemory': 'socket_memory',
66     'NeutronDpdkMemoryChannels': 'memory_channels'
67 }
68
69 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
70 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
71 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
72                       ".noarch.rpm"
73
74
75 def build_sdn_env_list(ds, sdn_map, env_list=None):
76     """
77     Builds a list of SDN environment files to be used in the deploy cmd.
78
79     This function recursively searches an sdn_map.  First the sdn controller is
80     matched and then the function looks for enabled features for that
81     controller to determine which environment files should be used.  By
82     default the feature will be checked if set to true in deploy settings to be
83     added to the list.  If a feature does not have a boolean value, then the
84     key and value pair to compare with are checked as a tuple (k,v).
85
86     :param ds: deploy settings
87     :param sdn_map: SDN map to recursively search
88     :param env_list: recursive var to hold previously found env_list
89     :return: A list of env files
90     """
91     if env_list is None:
92         env_list = list()
93     for k, v in sdn_map.items():
94         if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
95             if isinstance(v, dict):
96                 # Append default SDN env file first
97                 # The assumption is that feature-enabled SDN env files
98                 # override and do not conflict with previously set default
99                 # settings
100                 if ds['sdn_controller'] == k and 'default' in v:
101                     env_list.append(os.path.join(con.THT_ENV_DIR,
102                                                  v['default']))
103                 env_list.extend(build_sdn_env_list(ds, v))
104             else:
105                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
106         # check if the value is not a boolean
107         elif isinstance(v, tuple):
108                 if ds[k] == v[0]:
109                     env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
110     if len(env_list) == 0:
111         try:
112             env_list.append(os.path.join(
113                 con.THT_ENV_DIR, sdn_map['default']))
114         except KeyError:
115             logging.warning("Unable to find default file for SDN")
116
117     return env_list
118
119
120 def create_deploy_cmd(ds, ns, inv, tmp_dir,
121                       virtual, env_file='opnfv-environment.yaml',
122                       net_data=False):
123
124     logging.info("Creating deployment command")
125     deploy_options = ['network-environment.yaml']
126
127     if env_file:
128         deploy_options.append(env_file)
129     ds_opts = ds['deploy_options']
130     deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
131
132     for k, v in OTHER_FILE_MAP.items():
133         if k in ds_opts and ds_opts[k]:
134             deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
135
136     if ds_opts['ceph']:
137         prep_storage_env(ds, tmp_dir)
138         deploy_options.append(os.path.join(con.THT_ENV_DIR,
139                                            'storage-environment.yaml'))
140     if ds['global_params']['ha_enabled']:
141         deploy_options.append(os.path.join(con.THT_ENV_DIR,
142                                            'puppet-pacemaker.yaml'))
143
144     if virtual:
145         deploy_options.append('virtual-environment.yaml')
146     else:
147         deploy_options.append('baremetal-environment.yaml')
148
149     num_control, num_compute = inv.get_node_counts()
150     if num_control == 0 or num_compute == 0:
151         logging.error("Detected 0 control or compute nodes.  Control nodes: "
152                       "{}, compute nodes{}".format(num_control, num_compute))
153         raise ApexDeployException("Invalid number of control or computes")
154     elif num_control > 1 and not ds['global_params']['ha_enabled']:
155         num_control = 1
156     if platform.machine() == 'aarch64':
157         # aarch64 deploys were not completing in the default 90 mins.
158         # Not sure if this is related to the hardware the OOO support
159         # was developed on or the virtualization support in CentOS
160         # Either way it will probably get better over time  as the aarch
161         # support matures in CentOS and deploy time should be tested in
162         # the future so this multiplier can be removed.
163         con.DEPLOY_TIMEOUT *= 2
164     cmd = "openstack overcloud deploy --templates --timeout {} " \
165           .format(con.DEPLOY_TIMEOUT)
166     # build cmd env args
167     for option in deploy_options:
168         cmd += " -e {}".format(option)
169     cmd += " --ntp-server {}".format(ns['ntp'][0])
170     cmd += " --control-scale {}".format(num_control)
171     cmd += " --compute-scale {}".format(num_compute)
172     cmd += ' --control-flavor control --compute-flavor compute'
173     if net_data:
174         cmd += ' --networks-file network_data.yaml'
175     libvirt_type = 'kvm'
176     if virtual:
177         with open('/sys/module/kvm_intel/parameters/nested') as f:
178             nested_kvm = f.read().strip()
179             if nested_kvm != 'Y':
180                 libvirt_type = 'qemu'
181     cmd += ' --libvirt-type {}'.format(libvirt_type)
182     logging.info("Deploy command set: {}".format(cmd))
183
184     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
185         fh.write(cmd)
186     return cmd
187
188
189 def prep_image(ds, img, tmp_dir, root_pw=None):
190     """
191     Locates sdn image and preps for deployment.
192     :param ds: deploy settings
193     :param img: sdn image
194     :param tmp_dir: dir to store modified sdn image
195     :param root_pw: password to configure for overcloud image
196     :return: None
197     """
198     # TODO(trozet): Come up with a better way to organize this logic in this
199     # function
200     logging.info("Preparing image: {} for deployment".format(img))
201     if not os.path.isfile(img):
202         logging.error("Missing SDN image {}".format(img))
203         raise ApexDeployException("Missing SDN image file: {}".format(img))
204
205     ds_opts = ds['deploy_options']
206     virt_cmds = list()
207     sdn = ds_opts['sdn_controller']
208     # we need this due to rhbz #1436021
209     # fixed in systemd-219-37.el7
210     if sdn is not False:
211         logging.info("Neutron openvswitch-agent disabled")
212         virt_cmds.extend([{
213             con.VIRT_RUN_CMD:
214                 "rm -f /etc/systemd/system/multi-user.target.wants/"
215                 "neutron-openvswitch-agent.service"},
216             {
217             con.VIRT_RUN_CMD:
218                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
219                 ".service"
220         }])
221
222     if ds_opts['vpn']:
223         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
224         virt_cmds.append({
225             con.VIRT_RUN_CMD:
226                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
227                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
228         virt_cmds.append({
229             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
230                               "zrpcd_start.sh"})
231         virt_cmds.append({
232             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
233                               "init.d/zrpcd_start.sh' /etc/rc.local "})
234         virt_cmds.append({
235             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
236                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
237         logging.info("ZRPCD process started")
238
239     dataplane = ds_opts['dataplane']
240     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
241         logging.info("Enabling kernel modules for dpdk")
242         # file to module mapping
243         uio_types = {
244             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
245             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
246         }
247         for mod_file, mod in uio_types.items():
248             with open(mod_file, 'w') as fh:
249                 fh.write('#!/bin/bash\n')
250                 fh.write('exec /sbin/modprobe {}'.format(mod))
251                 fh.close()
252
253             virt_cmds.extend([
254                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
255                     mod_file)},
256                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
257                                    "{}".format(os.path.basename(mod_file))}
258             ])
259     if root_pw:
260         pw_op = "password:{}".format(root_pw)
261         virt_cmds.append({con.VIRT_PW: pw_op})
262     if ds_opts['sfc'] and dataplane == 'ovs':
263         virt_cmds.extend([
264             {con.VIRT_RUN_CMD: "yum -y install "
265                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
266                                "{}".format(OVS_NSH_KMOD_RPM)},
267             {con.VIRT_RUN_CMD: "yum downgrade -y "
268                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
269                                "{}".format(OVS_NSH_RPM)}
270         ])
271     if dataplane == 'fdio':
272         # Patch neutron with using OVS external interface for router
273         # and add generic linux NS interface driver
274         virt_cmds.append(
275             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
276                                "-p1 < neutron-patch-NSDriver.patch"})
277         if sdn is False:
278             virt_cmds.extend([
279                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
280                 {con.VIRT_RUN_CMD: "yum install -y "
281                                    "/root/nosdn_vpp_rpms/*.rpm"}
282             ])
283
284     if sdn == 'opendaylight':
285         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
286             virt_cmds.extend([
287                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
288                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
289                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
290                                    "/root/puppet-opendaylight-"
291                                    "{}.tar.gz".format(ds_opts['odl_version'])}
292             ])
293             if ds_opts['odl_version'] == 'master':
294                 virt_cmds.extend([
295                     {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
296                         ds_opts['odl_version'])}
297                 ])
298             else:
299                 virt_cmds.extend([
300                     {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
301                         ds_opts['odl_version'])}
302                 ])
303
304         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
305                 and ds_opts['odl_vpp_netvirt']:
306             virt_cmds.extend([
307                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
308                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
309                     ODL_NETVIRT_VPP_RPM)}
310             ])
311
312     if sdn == 'ovn':
313         virt_cmds.extend([
314             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
315                                "*openvswitch*"},
316             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
317                                "*openvswitch*"}
318         ])
319
320     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
321     shutil.copyfile(img, tmp_oc_image)
322     logging.debug("Temporary overcloud image stored as: {}".format(
323         tmp_oc_image))
324     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
325     logging.info("Overcloud image customization complete")
326
327
328 def make_ssh_key():
329     """
330     Creates public and private ssh keys with 1024 bit RSA encryption
331     :return: private, public key
332     """
333     key = rsa.generate_private_key(
334         backend=crypto_default_backend(),
335         public_exponent=65537,
336         key_size=1024
337     )
338
339     private_key = key.private_bytes(
340         crypto_serialization.Encoding.PEM,
341         crypto_serialization.PrivateFormat.PKCS8,
342         crypto_serialization.NoEncryption())
343     public_key = key.public_key().public_bytes(
344         crypto_serialization.Encoding.OpenSSH,
345         crypto_serialization.PublicFormat.OpenSSH
346     )
347     return private_key.decode('utf-8'), public_key.decode('utf-8')
348
349
350 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
351     """
352     Creates modified opnfv/network environments for deployment
353     :param ds: deploy settings
354     :param ns: network settings
355     :param inv: node inventory
356     :param opnfv_env: file path for opnfv-environment file
357     :param net_env: file path for network-environment file
358     :param tmp_dir: Apex tmp dir
359     :return:
360     """
361
362     logging.info("Preparing opnfv-environment and network-environment files")
363     ds_opts = ds['deploy_options']
364     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
365     shutil.copyfile(opnfv_env, tmp_opnfv_env)
366     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
367     tenant_nic = dict()
368     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
369     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
370     external_nic_map = ns['networks']['external'][0]['nic_mapping']
371     external_nic = dict()
372     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
373
374     # SSH keys
375     private_key, public_key = make_ssh_key()
376
377     # Make easier/faster variables to index in the file editor
378     if 'performance' in ds_opts:
379         perf = True
380         # vpp
381         if 'vpp' in ds_opts['performance']['Compute']:
382             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
383         else:
384             perf_vpp_comp = None
385         if 'vpp' in ds_opts['performance']['Controller']:
386             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
387         else:
388             perf_vpp_ctrl = None
389
390         # ovs
391         if 'ovs' in ds_opts['performance']['Compute']:
392             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
393         else:
394             perf_ovs_comp = None
395
396         # kernel
397         if 'kernel' in ds_opts['performance']['Compute']:
398             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
399         else:
400             perf_kern_comp = None
401     else:
402         perf = False
403
404     # Modify OPNFV environment
405     # TODO: Change to build a dict and outputting yaml rather than parsing
406     for line in fileinput.input(tmp_opnfv_env, inplace=True):
407         line = line.strip('\n')
408         output_line = line
409         if 'CloudDomain' in line:
410             output_line = "  CloudDomain: {}".format(ns['domain_name'])
411         elif 'replace_private_key' in line:
412             output_line = "    private_key: |\n"
413             key_out = ''
414             for line in private_key.splitlines():
415                 key_out += "      {}\n".format(line)
416             output_line += key_out
417         elif 'replace_public_key' in line:
418             output_line = "    public_key: '{}'".format(public_key)
419         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
420                 'resource_registry' in line:
421             output_line = "resource_registry:\n" \
422                           "  OS::TripleO::NodeUserData: first-boot.yaml"
423         elif 'ComputeExtraConfigPre' in line and \
424                 ds_opts['dataplane'] == 'ovs_dpdk':
425             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
426                           './ovs-dpdk-preconfig.yaml'
427
428         if ds_opts['sdn_controller'] == 'opendaylight' and \
429                 'odl_vpp_routing_node' in ds_opts:
430             if 'opendaylight::vpp_routing_node' in line:
431                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
432                                .format(ds_opts['odl_vpp_routing_node'],
433                                        ns['domain_name']))
434         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
435             if 'NeutronVPPAgentPhysnets' in line:
436                 output_line = ("  NeutronVPPAgentPhysnets: 'datacentre:{}'".
437                                format(tenant_nic['Controller']))
438         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
439                 'dvr') is True:
440             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
441                 output_line = ''
442             elif 'NeutronDhcpAgentsPerNetwork' in line:
443                 num_control, num_compute = inv.get_node_counts()
444                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
445                                .format(num_compute))
446             elif 'ComputeServices' in line:
447                 output_line = ("  ComputeServices:\n"
448                                "    - OS::TripleO::Services::NeutronDhcpAgent")
449
450         if perf:
451             for role in 'NovaCompute', 'Controller':
452                 if role == 'NovaCompute':
453                     perf_opts = perf_vpp_comp
454                 else:
455                     perf_opts = perf_vpp_ctrl
456                 cfg = "{}ExtraConfig".format(role)
457                 if cfg in line and perf_opts:
458                     perf_line = ''
459                     if 'main-core' in perf_opts:
460                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
461                                       .format(perf_opts['main-core']))
462                     if 'corelist-workers' in perf_opts:
463                         perf_line += ("\n    "
464                                       "fdio::vpp_cpu_corelist_workers: '{}'"
465                                       .format(perf_opts['corelist-workers']))
466                     if ds_opts['sdn_controller'] == 'opendaylight' and \
467                             ds_opts['dataplane'] == 'fdio':
468                         if role == 'NovaCompute':
469                             perf_line += ("\n    "
470                                           "tripleo::profile::base::neutron::"
471                                           "agents::honeycomb::"
472                                           "interface_role_mapping:"
473                                           " ['{}:tenant-interface',"
474                                           "'{}:public-interface']"
475                                           .format(tenant_nic[role],
476                                                   external_nic[role]))
477                         else:
478                             perf_line += ("\n    "
479                                           "tripleo::profile::base::neutron::"
480                                           "agents::honeycomb::"
481                                           "interface_role_mapping:"
482                                           " ['{}:tenant-interface']"
483                                           .format(tenant_nic[role]))
484                     if perf_line:
485                         output_line = ("  {}:{}".format(cfg, perf_line))
486
487             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
488                 for k, v in OVS_PERF_MAP.items():
489                     if k in line and v in perf_ovs_comp:
490                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
491
492             # kernel args
493             # (FIXME) use compute's kernel settings for all nodes for now.
494             if perf_kern_comp:
495                 if 'NovaSchedulerDefaultFilters' in line:
496                     output_line = \
497                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
498                         "ComputeFilter,AvailabilityZoneFilter," \
499                         "ComputeCapabilitiesFilter," \
500                         "ImagePropertiesFilter,NUMATopologyFilter'"
501                 elif 'ComputeKernelArgs' in line:
502                     kernel_args = ''
503                     for k, v in perf_kern_comp.items():
504                         kernel_args += "{}={} ".format(k, v)
505                     if kernel_args:
506                         output_line = "  ComputeKernelArgs: '{}'".\
507                             format(kernel_args)
508
509         print(output_line)
510
511     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
512
513
514 def generate_ceph_key():
515     key = os.urandom(16)
516     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
517     return base64.b64encode(header + key)
518
519
520 def prep_storage_env(ds, tmp_dir):
521     """
522     Creates storage environment file for deployment.  Source file is copied by
523     undercloud playbook to host.
524     :param ds:
525     :param tmp_dir:
526     :return:
527     """
528     ds_opts = ds['deploy_options']
529     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
530     if not os.path.isfile(storage_file):
531         logging.error("storage-environment file is not in tmp directory: {}. "
532                       "Check if file was copied from "
533                       "undercloud".format(tmp_dir))
534         raise ApexDeployException("storage-environment file not copied from "
535                                   "undercloud")
536     for line in fileinput.input(storage_file, inplace=True):
537         line = line.strip('\n')
538         if 'CephClusterFSID' in line:
539             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
540         elif 'CephMonKey' in line:
541             print("  CephMonKey: {}".format(generate_ceph_key().decode(
542                 'utf-8')))
543         elif 'CephAdminKey' in line:
544             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
545                 'utf-8')))
546         elif 'CephClientKey' in line:
547             print("  CephClientKey: {}".format(generate_ceph_key().decode(
548                 'utf-8')))
549         else:
550             print(line)
551     if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
552         with open(storage_file, 'a') as fh:
553             fh.write('  ExtraConfig:\n')
554             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
555                 ds_opts['ceph_device']
556             ))
557
558
559 def external_network_cmds(ns):
560     """
561     Generates external network openstack commands
562     :param ns: network settings
563     :return: list of commands to configure external network
564     """
565     if 'external' in ns.enabled_network_list:
566         net_config = ns['networks']['external'][0]
567         external = True
568         pool_start, pool_end = net_config['floating_ip_range']
569     else:
570         net_config = ns['networks']['admin']
571         external = False
572         pool_start, pool_end = ns['apex']['networks']['admin'][
573             'introspection_range']
574     nic_config = net_config['nic_mapping']
575     gateway = net_config['gateway']
576     cmds = list()
577     # create network command
578     if nic_config['compute']['vlan'] == 'native':
579         ext_type = 'flat'
580     else:
581         ext_type = "vlan --provider-segment {}".format(nic_config[
582                                                        'compute']['vlan'])
583     cmds.append("openstack network create external --project service "
584                 "--external --provider-network-type {} "
585                 "--provider-physical-network datacentre".format(ext_type))
586     # create subnet command
587     cidr = net_config['cidr']
588     subnet_cmd = "openstack subnet create external-subnet --project " \
589                  "service --network external --no-dhcp --gateway {} " \
590                  "--allocation-pool start={},end={} --subnet-range " \
591                  "{}".format(gateway, pool_start, pool_end, str(cidr))
592     if external and cidr.version == 6:
593         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
594                       '--ipv6-address-mode slaac'
595     cmds.append(subnet_cmd)
596     logging.debug("Neutron external network commands determined "
597                   "as: {}".format(cmds))
598     return cmds
599
600
601 def create_congress_cmds(overcloud_file):
602     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
603     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
604     logging.info("Creating congress commands")
605     try:
606         ds_cfg = [
607             "username={}".format(overcloudrc['OS_USERNAME']),
608             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
609             "password={}".format(overcloudrc['OS_PASSWORD']),
610             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
611         ]
612     except KeyError:
613         logging.error("Unable to find all keys required for congress in "
614                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
615                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
616                       "file: {}".format(overcloud_file))
617         raise
618     cmds = list()
619     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
620
621     for driver in drivers:
622         if driver == 'doctor':
623             cmd = "{} \"{}\"".format(driver, driver)
624         else:
625             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
626         if driver == 'nova':
627             cmd += ' --config api_version="2.34"'
628         logging.debug("Congress command created: {}".format(cmd))
629         cmds.append(cmd)
630     return cmds