wrapping up deploy items for aarch
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import shutil
16 import uuid
17 import struct
18 import time
19
20 from apex.common import constants as con
21 from apex.common.exceptions import ApexDeployException
22 from apex.common import parsers
23 from apex.virtual import utils as virt_utils
24 from cryptography.hazmat.primitives import serialization as \
25     crypto_serialization
26 from cryptography.hazmat.primitives.asymmetric import rsa
27 from cryptography.hazmat.backends import default_backend as \
28     crypto_default_backend
29
30
31 SDN_FILE_MAP = {
32     'opendaylight': {
33         'sfc': 'neutron-sfc-opendaylight.yaml',
34         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
35         'gluon': 'gluon.yaml',
36         'vpp': {
37             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
38             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
39             'default': 'neutron-opendaylight-honeycomb.yaml'
40         },
41         'default': 'neutron-opendaylight.yaml',
42     },
43     'onos': {
44         'sfc': 'neutron-onos-sfc.yaml',
45         'default': 'neutron-onos.yaml'
46     },
47     'ovn': 'neutron-ml2-ovn.yaml',
48     False: {
49         'vpp': 'neutron-ml2-vpp.yaml',
50         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
51     }
52 }
53
54 OTHER_FILE_MAP = {
55     'tacker': 'enable_tacker.yaml',
56     'congress': 'enable_congress.yaml',
57     'barometer': 'enable_barometer.yaml',
58     'rt_kvm': 'enable_rt_kvm.yaml'
59 }
60
61 OVS_PERF_MAP = {
62     'HostCpusList': 'dpdk_cores',
63     'NeutronDpdkCoreList': 'pmd_cores',
64     'NeutronDpdkSocketMemory': 'socket_memory',
65     'NeutronDpdkMemoryChannels': 'memory_channels'
66 }
67
68 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
69 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
70 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
71                       ".noarch.rpm"
72
73
74 def build_sdn_env_list(ds, sdn_map, env_list=None):
75     """
76     Builds a list of SDN environment files to be used in the deploy cmd.
77
78     This function recursively searches an sdn_map.  First the sdn controller is
79     matched and then the function looks for enabled features for that
80     controller to determine which environment files should be used.  By
81     default the feature will be checked if set to true in deploy settings to be
82     added to the list.  If a feature does not have a boolean value, then the
83     key and value pair to compare with are checked as a tuple (k,v).
84
85     :param ds: deploy settings
86     :param sdn_map: SDN map to recursively search
87     :param env_list: recursive var to hold previously found env_list
88     :return: A list of env files
89     """
90     if env_list is None:
91         env_list = list()
92     for k, v in sdn_map.items():
93         if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
94             if isinstance(v, dict):
95                 # Append default SDN env file first
96                 # The assumption is that feature-enabled SDN env files
97                 # override and do not conflict with previously set default
98                 # settings
99                 if ds['sdn_controller'] == k and 'default' in v:
100                     env_list.append(os.path.join(con.THT_ENV_DIR,
101                                                  v['default']))
102                 env_list.extend(build_sdn_env_list(ds, v))
103             else:
104                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
105         # check if the value is not a boolean
106         elif isinstance(v, tuple):
107                 if ds[k] == v[0]:
108                     env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
109     if len(env_list) == 0:
110         try:
111             env_list.append(os.path.join(
112                 con.THT_ENV_DIR, sdn_map['default']))
113         except KeyError:
114             logging.warning("Unable to find default file for SDN")
115
116     return env_list
117
118
119 def create_deploy_cmd(ds, ns, inv, tmp_dir,
120                       virtual, env_file='opnfv-environment.yaml',
121                       net_data=False):
122
123     logging.info("Creating deployment command")
124     deploy_options = ['network-environment.yaml']
125
126     if env_file:
127         deploy_options.append(env_file)
128     ds_opts = ds['deploy_options']
129     deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
130
131     for k, v in OTHER_FILE_MAP.items():
132         if k in ds_opts and ds_opts[k]:
133             deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
134
135     if ds_opts['ceph']:
136         prep_storage_env(ds, tmp_dir)
137         deploy_options.append(os.path.join(con.THT_ENV_DIR,
138                                            'storage-environment.yaml'))
139     if ds['global_params']['ha_enabled']:
140         deploy_options.append(os.path.join(con.THT_ENV_DIR,
141                                            'puppet-pacemaker.yaml'))
142
143     if virtual:
144         deploy_options.append('virtual-environment.yaml')
145     else:
146         deploy_options.append('baremetal-environment.yaml')
147
148     num_control, num_compute = inv.get_node_counts()
149     if num_control == 0 or num_compute == 0:
150         logging.error("Detected 0 control or compute nodes.  Control nodes: "
151                       "{}, compute nodes{}".format(num_control, num_compute))
152         raise ApexDeployException("Invalid number of control or computes")
153     elif num_control > 1 and not ds['global_params']['ha_enabled']:
154         num_control = 1
155     if platform.machine() == 'aarch64':
156         # aarch64 deploys were not completing in the default 90 mins.
157         # Not sure if this is related to the hardware the OOO support
158         # was developed on or the virtualization support in CentOS
159         # Either way it will probably get better over time  as the aarch
160         # support matures in CentOS and deploy time should be tested in
161         # the future so this multiplier can be removed.
162         con.DEPLOY_TIMEOUT *= 2
163     cmd = "openstack overcloud deploy --templates --timeout {} " \
164           .format(con.DEPLOY_TIMEOUT)
165     # build cmd env args
166     for option in deploy_options:
167         cmd += " -e {}".format(option)
168     cmd += " --ntp-server {}".format(ns['ntp'][0])
169     cmd += " --control-scale {}".format(num_control)
170     cmd += " --compute-scale {}".format(num_compute)
171     cmd += ' --control-flavor control --compute-flavor compute'
172     if net_data:
173         cmd += ' --networks-file network_data.yaml'
174     libvirt_type = 'kvm'
175     if virtual:
176         with open('/sys/module/kvm_intel/parameters/nested') as f:
177             nested_kvm = f.read().strip()
178             if nested_kvm != 'Y':
179                 libvirt_type = 'qemu'
180     cmd += ' --libvirt-type {}'.format(libvirt_type)
181     logging.info("Deploy command set: {}".format(cmd))
182
183     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
184         fh.write(cmd)
185     return cmd
186
187
188 def prep_image(ds, img, tmp_dir, root_pw=None):
189     """
190     Locates sdn image and preps for deployment.
191     :param ds: deploy settings
192     :param img: sdn image
193     :param tmp_dir: dir to store modified sdn image
194     :param root_pw: password to configure for overcloud image
195     :return: None
196     """
197     # TODO(trozet): Come up with a better way to organize this logic in this
198     # function
199     logging.info("Preparing image: {} for deployment".format(img))
200     if not os.path.isfile(img):
201         logging.error("Missing SDN image {}".format(img))
202         raise ApexDeployException("Missing SDN image file: {}".format(img))
203
204     ds_opts = ds['deploy_options']
205     virt_cmds = list()
206     sdn = ds_opts['sdn_controller']
207     # we need this due to rhbz #1436021
208     # fixed in systemd-219-37.el7
209     if sdn is not False:
210         logging.info("Neutron openvswitch-agent disabled")
211         virt_cmds.extend([{
212             con.VIRT_RUN_CMD:
213                 "rm -f /etc/systemd/system/multi-user.target.wants/"
214                 "neutron-openvswitch-agent.service"},
215             {
216             con.VIRT_RUN_CMD:
217                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
218                 ".service"
219         }])
220
221     if ds_opts['vpn']:
222         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
223         virt_cmds.append({
224             con.VIRT_RUN_CMD:
225                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
226                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
227         virt_cmds.append({
228             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
229                               "zrpcd_start.sh"})
230         virt_cmds.append({
231             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
232                               "init.d/zrpcd_start.sh' /etc/rc.local "})
233         virt_cmds.append({
234             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
235                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
236         logging.info("ZRPCD process started")
237
238     dataplane = ds_opts['dataplane']
239     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
240         logging.info("Enabling kernel modules for dpdk")
241         # file to module mapping
242         uio_types = {
243             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
244             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
245         }
246         for mod_file, mod in uio_types.items():
247             with open(mod_file, 'w') as fh:
248                 fh.write('#!/bin/bash\n')
249                 fh.write('exec /sbin/modprobe {}'.format(mod))
250                 fh.close()
251
252             virt_cmds.extend([
253                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
254                     mod_file)},
255                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
256                                    "{}".format(os.path.basename(mod_file))}
257             ])
258     if root_pw:
259         pw_op = "password:{}".format(root_pw)
260         virt_cmds.append({con.VIRT_PW: pw_op})
261     if ds_opts['sfc'] and dataplane == 'ovs':
262         virt_cmds.extend([
263             {con.VIRT_RUN_CMD: "yum -y install "
264                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
265                                "{}".format(OVS_NSH_KMOD_RPM)},
266             {con.VIRT_RUN_CMD: "yum downgrade -y "
267                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
268                                "{}".format(OVS_NSH_RPM)}
269         ])
270     if dataplane == 'fdio':
271         # Patch neutron with using OVS external interface for router
272         # and add generic linux NS interface driver
273         virt_cmds.append(
274             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
275                                "-p1 < neutron-patch-NSDriver.patch"})
276         if sdn is False:
277             virt_cmds.extend([
278                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
279                 {con.VIRT_RUN_CMD: "yum install -y "
280                                    "/root/nosdn_vpp_rpms/*.rpm"}
281             ])
282
283     if sdn == 'opendaylight':
284         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
285             virt_cmds.extend([
286                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
287                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
288                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
289                                    "/root/puppet-opendaylight-"
290                                    "{}.tar.gz".format(ds_opts['odl_version'])}
291             ])
292             if ds_opts['odl_version'] == 'master':
293                 virt_cmds.extend([
294                     {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
295                         ds_opts['odl_version'])}
296                 ])
297             else:
298                 virt_cmds.extend([
299                     {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
300                         ds_opts['odl_version'])}
301                 ])
302
303         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
304                 and ds_opts['odl_vpp_netvirt']:
305             virt_cmds.extend([
306                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
307                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
308                     ODL_NETVIRT_VPP_RPM)}
309             ])
310
311     if sdn == 'ovn':
312         virt_cmds.extend([
313             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
314                                "*openvswitch*"},
315             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
316                                "*openvswitch*"}
317         ])
318
319     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
320     shutil.copyfile(img, tmp_oc_image)
321     logging.debug("Temporary overcloud image stored as: {}".format(
322         tmp_oc_image))
323     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
324     logging.info("Overcloud image customization complete")
325
326
327 def make_ssh_key():
328     """
329     Creates public and private ssh keys with 1024 bit RSA encryption
330     :return: private, public key
331     """
332     key = rsa.generate_private_key(
333         backend=crypto_default_backend(),
334         public_exponent=65537,
335         key_size=1024
336     )
337
338     private_key = key.private_bytes(
339         crypto_serialization.Encoding.PEM,
340         crypto_serialization.PrivateFormat.PKCS8,
341         crypto_serialization.NoEncryption())
342     public_key = key.public_key().public_bytes(
343         crypto_serialization.Encoding.OpenSSH,
344         crypto_serialization.PublicFormat.OpenSSH
345     )
346     return private_key.decode('utf-8'), public_key.decode('utf-8')
347
348
349 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
350     """
351     Creates modified opnfv/network environments for deployment
352     :param ds: deploy settings
353     :param ns: network settings
354     :param inv: node inventory
355     :param opnfv_env: file path for opnfv-environment file
356     :param net_env: file path for network-environment file
357     :param tmp_dir: Apex tmp dir
358     :return:
359     """
360
361     logging.info("Preparing opnfv-environment and network-environment files")
362     ds_opts = ds['deploy_options']
363     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
364     shutil.copyfile(opnfv_env, tmp_opnfv_env)
365     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
366     tenant_nic = dict()
367     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
368     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
369     external_nic_map = ns['networks']['external'][0]['nic_mapping']
370     external_nic = dict()
371     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
372
373     # SSH keys
374     private_key, public_key = make_ssh_key()
375
376     # Make easier/faster variables to index in the file editor
377     if 'performance' in ds_opts:
378         perf = True
379         # vpp
380         if 'vpp' in ds_opts['performance']['Compute']:
381             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
382         else:
383             perf_vpp_comp = None
384         if 'vpp' in ds_opts['performance']['Controller']:
385             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
386         else:
387             perf_vpp_ctrl = None
388
389         # ovs
390         if 'ovs' in ds_opts['performance']['Compute']:
391             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
392         else:
393             perf_ovs_comp = None
394
395         # kernel
396         if 'kernel' in ds_opts['performance']['Compute']:
397             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
398         else:
399             perf_kern_comp = None
400     else:
401         perf = False
402
403     # Modify OPNFV environment
404     # TODO: Change to build a dict and outputting yaml rather than parsing
405     for line in fileinput.input(tmp_opnfv_env, inplace=True):
406         line = line.strip('\n')
407         output_line = line
408         if 'CloudDomain' in line:
409             output_line = "  CloudDomain: {}".format(ns['domain_name'])
410         elif 'replace_private_key' in line:
411             output_line = "    private_key: |\n"
412             key_out = ''
413             for line in private_key.splitlines():
414                 key_out += "      {}\n".format(line)
415             output_line += key_out
416         elif 'replace_public_key' in line:
417             output_line = "    public_key: '{}'".format(public_key)
418         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
419                 'resource_registry' in line:
420             output_line = "resource_registry:\n" \
421                           "  OS::TripleO::NodeUserData: first-boot.yaml"
422         elif 'ComputeExtraConfigPre' in line and \
423                 ds_opts['dataplane'] == 'ovs_dpdk':
424             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
425                           './ovs-dpdk-preconfig.yaml'
426
427         if ds_opts['sdn_controller'] == 'opendaylight' and \
428                 'odl_vpp_routing_node' in ds_opts:
429             if 'opendaylight::vpp_routing_node' in line:
430                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
431                                .format(ds_opts['odl_vpp_routing_node'],
432                                        ns['domain_name']))
433         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
434             if 'NeutronVPPAgentPhysnets' in line:
435                 output_line = ("  NeutronVPPAgentPhysnets: 'datacentre:{}'".
436                                format(tenant_nic['Controller']))
437         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
438                 'dvr') is True:
439             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
440                 output_line = ''
441             elif 'NeutronDhcpAgentsPerNetwork' in line:
442                 num_control, num_compute = inv.get_node_counts()
443                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
444                                .format(num_compute))
445             elif 'ComputeServices' in line:
446                 output_line = ("  ComputeServices:\n"
447                                "    - OS::TripleO::Services::NeutronDhcpAgent")
448
449         if perf:
450             for role in 'NovaCompute', 'Controller':
451                 if role == 'NovaCompute':
452                     perf_opts = perf_vpp_comp
453                 else:
454                     perf_opts = perf_vpp_ctrl
455                 cfg = "{}ExtraConfig".format(role)
456                 if cfg in line and perf_opts:
457                     perf_line = ''
458                     if 'main-core' in perf_opts:
459                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
460                                       .format(perf_opts['main-core']))
461                     if 'corelist-workers' in perf_opts:
462                         perf_line += ("\n    "
463                                       "fdio::vpp_cpu_corelist_workers: '{}'"
464                                       .format(perf_opts['corelist-workers']))
465                     if ds_opts['sdn_controller'] == 'opendaylight' and \
466                             ds_opts['dataplane'] == 'fdio':
467                         if role == 'NovaCompute':
468                             perf_line += ("\n    "
469                                           "tripleo::profile::base::neutron::"
470                                           "agents::honeycomb::"
471                                           "interface_role_mapping:"
472                                           " ['{}:tenant-interface',"
473                                           "'{}:public-interface']"
474                                           .format(tenant_nic[role],
475                                                   external_nic[role]))
476                         else:
477                             perf_line += ("\n    "
478                                           "tripleo::profile::base::neutron::"
479                                           "agents::honeycomb::"
480                                           "interface_role_mapping:"
481                                           " ['{}:tenant-interface']"
482                                           .format(tenant_nic[role]))
483                     if perf_line:
484                         output_line = ("  {}:{}".format(cfg, perf_line))
485
486             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
487                 for k, v in OVS_PERF_MAP.items():
488                     if k in line and v in perf_ovs_comp:
489                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
490
491             # kernel args
492             # (FIXME) use compute's kernel settings for all nodes for now.
493             if perf_kern_comp:
494                 if 'NovaSchedulerDefaultFilters' in line:
495                     output_line = \
496                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
497                         "ComputeFilter,AvailabilityZoneFilter," \
498                         "ComputeCapabilitiesFilter," \
499                         "ImagePropertiesFilter,NUMATopologyFilter'"
500                 elif 'ComputeKernelArgs' in line:
501                     kernel_args = ''
502                     for k, v in perf_kern_comp.items():
503                         kernel_args += "{}={} ".format(k, v)
504                     if kernel_args:
505                         output_line = "  ComputeKernelArgs: '{}'".\
506                             format(kernel_args)
507
508         print(output_line)
509
510     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
511
512
513 def generate_ceph_key():
514     key = os.urandom(16)
515     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
516     return base64.b64encode(header + key)
517
518
519 def prep_storage_env(ds, tmp_dir):
520     """
521     Creates storage environment file for deployment.  Source file is copied by
522     undercloud playbook to host.
523     :param ds:
524     :param tmp_dir:
525     :return:
526     """
527     ds_opts = ds['deploy_options']
528     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
529     if not os.path.isfile(storage_file):
530         logging.error("storage-environment file is not in tmp directory: {}. "
531                       "Check if file was copied from "
532                       "undercloud".format(tmp_dir))
533         raise ApexDeployException("storage-environment file not copied from "
534                                   "undercloud")
535     for line in fileinput.input(storage_file, inplace=True):
536         line = line.strip('\n')
537         if 'CephClusterFSID' in line:
538             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
539         elif 'CephMonKey' in line:
540             print("  CephMonKey: {}".format(generate_ceph_key().decode(
541                 'utf-8')))
542         elif 'CephAdminKey' in line:
543             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
544                 'utf-8')))
545         else:
546             print(line)
547     if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
548         with open(storage_file, 'a') as fh:
549             fh.write('  ExtraConfig:\n')
550             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
551                 ds_opts['ceph_device']
552             ))
553
554
555 def external_network_cmds(ns):
556     """
557     Generates external network openstack commands
558     :param ns: network settings
559     :return: list of commands to configure external network
560     """
561     if 'external' in ns.enabled_network_list:
562         net_config = ns['networks']['external'][0]
563         external = True
564         pool_start, pool_end = net_config['floating_ip_range']
565     else:
566         net_config = ns['networks']['admin']
567         external = False
568         pool_start, pool_end = ns['apex']['networks']['admin'][
569             'introspection_range']
570     nic_config = net_config['nic_mapping']
571     gateway = net_config['gateway']
572     cmds = list()
573     # create network command
574     if nic_config['compute']['vlan'] == 'native':
575         ext_type = 'flat'
576     else:
577         ext_type = "vlan --provider-segment {}".format(nic_config[
578                                                        'compute']['vlan'])
579     cmds.append("openstack network create external --project service "
580                 "--external --provider-network-type {} "
581                 "--provider-physical-network datacentre".format(ext_type))
582     # create subnet command
583     cidr = net_config['cidr']
584     subnet_cmd = "openstack subnet create external-subnet --project " \
585                  "service --network external --no-dhcp --gateway {} " \
586                  "--allocation-pool start={},end={} --subnet-range " \
587                  "{}".format(gateway, pool_start, pool_end, str(cidr))
588     if external and cidr.version == 6:
589         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
590                       '--ipv6-address-mode slaac'
591     cmds.append(subnet_cmd)
592     logging.debug("Neutron external network commands determined "
593                   "as: {}".format(cmds))
594     return cmds
595
596
597 def create_congress_cmds(overcloud_file):
598     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
599     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
600     logging.info("Creating congress commands")
601     try:
602         ds_cfg = [
603             "username={}".format(overcloudrc['OS_USERNAME']),
604             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
605             "password={}".format(overcloudrc['OS_PASSWORD']),
606             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
607         ]
608     except KeyError:
609         logging.error("Unable to find all keys required for congress in "
610                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
611                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
612                       "file: {}".format(overcloud_file))
613         raise
614     cmds = list()
615     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
616
617     for driver in drivers:
618         if driver == 'doctor':
619             cmd = "{} \"{}\"".format(driver, driver)
620         else:
621             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
622         if driver == 'nova':
623             cmd += ' --config api_version="2.34"'
624         logging.debug("Congress command created: {}".format(cmd))
625         cmds.append(cmd)
626     return cmds