Merge "Fixes including default SDN env file in deploy cmd"
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import shutil
15 import uuid
16 import struct
17 import time
18
19 from apex.common import constants as con
20 from apex.common.exceptions import ApexDeployException
21 from apex.common import parsers
22 from apex.virtual import utils as virt_utils
23 from cryptography.hazmat.primitives import serialization as \
24     crypto_serialization
25 from cryptography.hazmat.primitives.asymmetric import rsa
26 from cryptography.hazmat.backends import default_backend as \
27     crypto_default_backend
28
29
30 SDN_FILE_MAP = {
31     'opendaylight': {
32         'sfc': 'neutron-sfc-opendaylight.yaml',
33         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
34         'gluon': 'gluon.yaml',
35         'vpp': {
36             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
37             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
38             'default': 'neutron-opendaylight-honeycomb.yaml'
39         },
40         'default': 'neutron-opendaylight.yaml',
41     },
42     'onos': {
43         'sfc': 'neutron-onos-sfc.yaml',
44         'default': 'neutron-onos.yaml'
45     },
46     'ovn': 'neutron-ml2-ovn.yaml',
47     False: {
48         'vpp': 'neutron-ml2-vpp.yaml',
49         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
50     }
51 }
52
53 OTHER_FILE_MAP = {
54     'tacker': 'enable_tacker.yaml',
55     'congress': 'enable_congress.yaml',
56     'barometer': 'enable_barometer.yaml',
57     'rt_kvm': 'enable_rt_kvm.yaml'
58 }
59
60 OVS_PERF_MAP = {
61     'HostCpusList': 'dpdk_cores',
62     'NeutronDpdkCoreList': 'pmd_cores',
63     'NeutronDpdkSocketMemory': 'socket_memory',
64     'NeutronDpdkMemoryChannels': 'memory_channels'
65 }
66
67 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
68 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
69 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
70                       ".noarch.rpm"
71
72
73 def build_sdn_env_list(ds, sdn_map, env_list=None):
74     """
75     Builds a list of SDN environment files to be used in the deploy cmd.
76
77     This function recursively searches an sdn_map.  First the sdn controller is
78     matched and then the function looks for enabled features for that
79     controller to determine which environment files should be used.  By
80     default the feature will be checked if set to true in deploy settings to be
81     added to the list.  If a feature does not have a boolean value, then the
82     key and value pair to compare with are checked as a tuple (k,v).
83
84     :param ds: deploy settings
85     :param sdn_map: SDN map to recursively search
86     :param env_list: recursive var to hold previously found env_list
87     :return: A list of env files
88     """
89     if env_list is None:
90         env_list = list()
91     for k, v in sdn_map.items():
92         if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
93             if isinstance(v, dict):
94                 # Append default SDN env file first
95                 # The assumption is that feature-enabled SDN env files
96                 # override and do not conflict with previously set default
97                 # settings
98                 if ds['sdn_controller'] == k and 'default' in v:
99                     env_list.append(os.path.join(con.THT_ENV_DIR,
100                                                  v['default']))
101                 env_list.extend(build_sdn_env_list(ds, v))
102             else:
103                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
104         # check if the value is not a boolean
105         elif isinstance(v, tuple):
106                 if ds[k] == v[0]:
107                     env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
108     if len(env_list) == 0:
109         try:
110             env_list.append(os.path.join(
111                 con.THT_ENV_DIR, sdn_map['default']))
112         except KeyError:
113             logging.warning("Unable to find default file for SDN")
114
115     return env_list
116
117
118 def create_deploy_cmd(ds, ns, inv, tmp_dir,
119                       virtual, env_file='opnfv-environment.yaml',
120                       net_data=False):
121
122     logging.info("Creating deployment command")
123     deploy_options = ['network-environment.yaml']
124
125     if env_file:
126         deploy_options.append(env_file)
127     ds_opts = ds['deploy_options']
128     deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
129
130     for k, v in OTHER_FILE_MAP.items():
131         if k in ds_opts and ds_opts[k]:
132             deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
133
134     if ds_opts['ceph']:
135         prep_storage_env(ds, tmp_dir)
136         deploy_options.append(os.path.join(con.THT_ENV_DIR,
137                                            'storage-environment.yaml'))
138     if ds['global_params']['ha_enabled']:
139         deploy_options.append(os.path.join(con.THT_ENV_DIR,
140                                            'puppet-pacemaker.yaml'))
141
142     if virtual:
143         deploy_options.append('virtual-environment.yaml')
144     else:
145         deploy_options.append('baremetal-environment.yaml')
146
147     num_control, num_compute = inv.get_node_counts()
148     if num_control == 0 or num_compute == 0:
149         logging.error("Detected 0 control or compute nodes.  Control nodes: "
150                       "{}, compute nodes{}".format(num_control, num_compute))
151         raise ApexDeployException("Invalid number of control or computes")
152     elif num_control > 1 and not ds['global_params']['ha_enabled']:
153         num_control = 1
154     cmd = "openstack overcloud deploy --templates --timeout {} " \
155           .format(con.DEPLOY_TIMEOUT)
156     # build cmd env args
157     for option in deploy_options:
158         cmd += " -e {}".format(option)
159     cmd += " --ntp-server {}".format(ns['ntp'][0])
160     cmd += " --control-scale {}".format(num_control)
161     cmd += " --compute-scale {}".format(num_compute)
162     cmd += ' --control-flavor control --compute-flavor compute'
163     if net_data:
164         cmd += ' --networks-file network_data.yaml'
165     libvirt_type = 'kvm'
166     if virtual:
167         with open('/sys/module/kvm_intel/parameters/nested') as f:
168             nested_kvm = f.read().strip()
169             if nested_kvm != 'Y':
170                 libvirt_type = 'qemu'
171     cmd += ' --libvirt-type {}'.format(libvirt_type)
172     logging.info("Deploy command set: {}".format(cmd))
173
174     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
175         fh.write(cmd)
176     return cmd
177
178
179 def prep_image(ds, img, tmp_dir, root_pw=None):
180     """
181     Locates sdn image and preps for deployment.
182     :param ds: deploy settings
183     :param img: sdn image
184     :param tmp_dir: dir to store modified sdn image
185     :param root_pw: password to configure for overcloud image
186     :return: None
187     """
188     # TODO(trozet): Come up with a better way to organize this logic in this
189     # function
190     logging.info("Preparing image: {} for deployment".format(img))
191     if not os.path.isfile(img):
192         logging.error("Missing SDN image {}".format(img))
193         raise ApexDeployException("Missing SDN image file: {}".format(img))
194
195     ds_opts = ds['deploy_options']
196     virt_cmds = list()
197     sdn = ds_opts['sdn_controller']
198     # we need this due to rhbz #1436021
199     # fixed in systemd-219-37.el7
200     if sdn is not False:
201         logging.info("Neutron openvswitch-agent disabled")
202         virt_cmds.extend([{
203             con.VIRT_RUN_CMD:
204                 "rm -f /etc/systemd/system/multi-user.target.wants/"
205                 "neutron-openvswitch-agent.service"},
206             {
207             con.VIRT_RUN_CMD:
208                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
209                 ".service"
210         }])
211
212     if ds_opts['vpn']:
213         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
214         virt_cmds.append({
215             con.VIRT_RUN_CMD:
216                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
217                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
218         virt_cmds.append({
219             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
220                               "zrpcd_start.sh"})
221         virt_cmds.append({
222             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
223                               "init.d/zrpcd_start.sh' /etc/rc.local "})
224         virt_cmds.append({
225             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
226                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
227         logging.info("ZRPCD process started")
228
229     dataplane = ds_opts['dataplane']
230     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
231         logging.info("Enabling kernel modules for dpdk")
232         # file to module mapping
233         uio_types = {
234             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
235             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
236         }
237         for mod_file, mod in uio_types.items():
238             with open(mod_file, 'w') as fh:
239                 fh.write('#!/bin/bash\n')
240                 fh.write('exec /sbin/modprobe {}'.format(mod))
241                 fh.close()
242
243             virt_cmds.extend([
244                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
245                     mod_file)},
246                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
247                                    "{}".format(os.path.basename(mod_file))}
248             ])
249     if root_pw:
250         pw_op = "password:{}".format(root_pw)
251         virt_cmds.append({con.VIRT_PW: pw_op})
252     if ds_opts['sfc'] and dataplane == 'ovs':
253         virt_cmds.extend([
254             {con.VIRT_RUN_CMD: "yum -y install "
255                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
256                                "{}".format(OVS_NSH_KMOD_RPM)},
257             {con.VIRT_RUN_CMD: "yum downgrade -y "
258                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
259                                "{}".format(OVS_NSH_RPM)}
260         ])
261     if dataplane == 'fdio':
262         # Patch neutron with using OVS external interface for router
263         # and add generic linux NS interface driver
264         virt_cmds.append(
265             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
266                                "-p1 < neutron-patch-NSDriver.patch"})
267         if sdn is False:
268             virt_cmds.extend([
269                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
270                 {con.VIRT_RUN_CMD: "yum install -y "
271                                    "/root/nosdn_vpp_rpms/*.rpm"}
272             ])
273
274     if sdn == 'opendaylight':
275         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
276             virt_cmds.extend([
277                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
278                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
279                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
280                                    "/root/puppet-opendaylight-"
281                                    "{}.tar.gz".format(ds_opts['odl_version'])}
282             ])
283             if ds_opts['odl_version'] == 'master':
284                 virt_cmds.extend([
285                     {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
286                         ds_opts['odl_version'])}
287                 ])
288             else:
289                 virt_cmds.extend([
290                     {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
291                         ds_opts['odl_version'])}
292                 ])
293
294         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
295                 and ds_opts['odl_vpp_netvirt']:
296             virt_cmds.extend([
297                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
298                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
299                     ODL_NETVIRT_VPP_RPM)}
300             ])
301
302     if sdn == 'ovn':
303         virt_cmds.extend([
304             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
305                                "*openvswitch*"},
306             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
307                                "*openvswitch*"}
308         ])
309
310     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
311     shutil.copyfile(img, tmp_oc_image)
312     logging.debug("Temporary overcloud image stored as: {}".format(
313         tmp_oc_image))
314     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
315     logging.info("Overcloud image customization complete")
316
317
318 def make_ssh_key():
319     """
320     Creates public and private ssh keys with 1024 bit RSA encryption
321     :return: private, public key
322     """
323     key = rsa.generate_private_key(
324         backend=crypto_default_backend(),
325         public_exponent=65537,
326         key_size=1024
327     )
328
329     private_key = key.private_bytes(
330         crypto_serialization.Encoding.PEM,
331         crypto_serialization.PrivateFormat.PKCS8,
332         crypto_serialization.NoEncryption())
333     public_key = key.public_key().public_bytes(
334         crypto_serialization.Encoding.OpenSSH,
335         crypto_serialization.PublicFormat.OpenSSH
336     )
337     return private_key.decode('utf-8'), public_key.decode('utf-8')
338
339
340 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
341     """
342     Creates modified opnfv/network environments for deployment
343     :param ds: deploy settings
344     :param ns: network settings
345     :param inv: node inventory
346     :param opnfv_env: file path for opnfv-environment file
347     :param net_env: file path for network-environment file
348     :param tmp_dir: Apex tmp dir
349     :return:
350     """
351
352     logging.info("Preparing opnfv-environment and network-environment files")
353     ds_opts = ds['deploy_options']
354     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
355     shutil.copyfile(opnfv_env, tmp_opnfv_env)
356     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
357     tenant_nic = dict()
358     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
359     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
360     external_nic_map = ns['networks']['external'][0]['nic_mapping']
361     external_nic = dict()
362     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
363
364     # SSH keys
365     private_key, public_key = make_ssh_key()
366
367     # Make easier/faster variables to index in the file editor
368     if 'performance' in ds_opts:
369         perf = True
370         # vpp
371         if 'vpp' in ds_opts['performance']['Compute']:
372             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
373         else:
374             perf_vpp_comp = None
375         if 'vpp' in ds_opts['performance']['Controller']:
376             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
377         else:
378             perf_vpp_ctrl = None
379
380         # ovs
381         if 'ovs' in ds_opts['performance']['Compute']:
382             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
383         else:
384             perf_ovs_comp = None
385
386         # kernel
387         if 'kernel' in ds_opts['performance']['Compute']:
388             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
389         else:
390             perf_kern_comp = None
391     else:
392         perf = False
393
394     # Modify OPNFV environment
395     # TODO: Change to build a dict and outputting yaml rather than parsing
396     for line in fileinput.input(tmp_opnfv_env, inplace=True):
397         line = line.strip('\n')
398         output_line = line
399         if 'CloudDomain' in line:
400             output_line = "  CloudDomain: {}".format(ns['domain_name'])
401         elif 'replace_private_key' in line:
402             output_line = "    private_key: |\n"
403             key_out = ''
404             for line in private_key.splitlines():
405                 key_out += "      {}\n".format(line)
406             output_line += key_out
407         elif 'replace_public_key' in line:
408             output_line = "    public_key: '{}'".format(public_key)
409         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
410                 'resource_registry' in line:
411             output_line = "resource_registry:\n" \
412                           "  OS::TripleO::NodeUserData: first-boot.yaml"
413         elif 'ComputeExtraConfigPre' in line and \
414                 ds_opts['dataplane'] == 'ovs_dpdk':
415             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
416                           './ovs-dpdk-preconfig.yaml'
417
418         if ds_opts['sdn_controller'] == 'opendaylight' and \
419                 'odl_vpp_routing_node' in ds_opts:
420             if 'opendaylight::vpp_routing_node' in line:
421                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
422                                .format(ds_opts['odl_vpp_routing_node'],
423                                        ns['domain_name']))
424         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
425             if 'NeutronVPPAgentPhysnets' in line:
426                 output_line = ("  NeutronVPPAgentPhysnets: 'datacentre:{}'".
427                                format(tenant_nic['Controller']))
428         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
429                 'dvr') is True:
430             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
431                 output_line = ''
432             elif 'NeutronDhcpAgentsPerNetwork' in line:
433                 num_control, num_compute = inv.get_node_counts()
434                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
435                                .format(num_compute))
436             elif 'ComputeServices' in line:
437                 output_line = ("  ComputeServices:\n"
438                                "    - OS::TripleO::Services::NeutronDhcpAgent")
439
440         if perf:
441             for role in 'NovaCompute', 'Controller':
442                 if role == 'NovaCompute':
443                     perf_opts = perf_vpp_comp
444                 else:
445                     perf_opts = perf_vpp_ctrl
446                 cfg = "{}ExtraConfig".format(role)
447                 if cfg in line and perf_opts:
448                     perf_line = ''
449                     if 'main-core' in perf_opts:
450                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
451                                       .format(perf_opts['main-core']))
452                     if 'corelist-workers' in perf_opts:
453                         perf_line += ("\n    "
454                                       "fdio::vpp_cpu_corelist_workers: '{}'"
455                                       .format(perf_opts['corelist-workers']))
456                     if ds_opts['sdn_controller'] == 'opendaylight' and \
457                             ds_opts['dataplane'] == 'fdio':
458                         if role == 'NovaCompute':
459                             perf_line += ("\n    "
460                                           "tripleo::profile::base::neutron::"
461                                           "agents::honeycomb::"
462                                           "interface_role_mapping:"
463                                           " ['{}:tenant-interface',"
464                                           "'{}:public-interface']"
465                                           .format(tenant_nic[role],
466                                                   external_nic[role]))
467                         else:
468                             perf_line += ("\n    "
469                                           "tripleo::profile::base::neutron::"
470                                           "agents::honeycomb::"
471                                           "interface_role_mapping:"
472                                           " ['{}:tenant-interface']"
473                                           .format(tenant_nic[role]))
474                     if perf_line:
475                         output_line = ("  {}:{}".format(cfg, perf_line))
476
477             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
478                 for k, v in OVS_PERF_MAP.items():
479                     if k in line and v in perf_ovs_comp:
480                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
481
482             # kernel args
483             # (FIXME) use compute's kernel settings for all nodes for now.
484             if perf_kern_comp:
485                 if 'NovaSchedulerDefaultFilters' in line:
486                     output_line = \
487                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
488                         "ComputeFilter,AvailabilityZoneFilter," \
489                         "ComputeCapabilitiesFilter," \
490                         "ImagePropertiesFilter,NUMATopologyFilter'"
491                 elif 'ComputeKernelArgs' in line:
492                     kernel_args = ''
493                     for k, v in perf_kern_comp.items():
494                         kernel_args += "{}={} ".format(k, v)
495                     if kernel_args:
496                         output_line = "  ComputeKernelArgs: '{}'".\
497                             format(kernel_args)
498
499         print(output_line)
500
501     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
502
503
504 def generate_ceph_key():
505     key = os.urandom(16)
506     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
507     return base64.b64encode(header + key)
508
509
510 def prep_storage_env(ds, tmp_dir):
511     """
512     Creates storage environment file for deployment.  Source file is copied by
513     undercloud playbook to host.
514     :param ds:
515     :param tmp_dir:
516     :return:
517     """
518     ds_opts = ds['deploy_options']
519     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
520     if not os.path.isfile(storage_file):
521         logging.error("storage-environment file is not in tmp directory: {}. "
522                       "Check if file was copied from "
523                       "undercloud".format(tmp_dir))
524         raise ApexDeployException("storage-environment file not copied from "
525                                   "undercloud")
526     for line in fileinput.input(storage_file, inplace=True):
527         line = line.strip('\n')
528         if 'CephClusterFSID' in line:
529             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
530         elif 'CephMonKey' in line:
531             print("  CephMonKey: {}".format(generate_ceph_key().decode(
532                 'utf-8')))
533         elif 'CephAdminKey' in line:
534             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
535                 'utf-8')))
536         else:
537             print(line)
538     if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
539         with open(storage_file, 'a') as fh:
540             fh.write('  ExtraConfig:\n')
541             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
542                 ds_opts['ceph_device']
543             ))
544
545
546 def external_network_cmds(ns):
547     """
548     Generates external network openstack commands
549     :param ns: network settings
550     :return: list of commands to configure external network
551     """
552     if 'external' in ns.enabled_network_list:
553         net_config = ns['networks']['external'][0]
554         external = True
555         pool_start, pool_end = net_config['floating_ip_range']
556     else:
557         net_config = ns['networks']['admin']
558         external = False
559         pool_start, pool_end = ns['apex']['networks']['admin'][
560             'introspection_range']
561     nic_config = net_config['nic_mapping']
562     gateway = net_config['gateway']
563     cmds = list()
564     # create network command
565     if nic_config['compute']['vlan'] == 'native':
566         ext_type = 'flat'
567     else:
568         ext_type = "vlan --provider-segment {}".format(nic_config[
569                                                        'compute']['vlan'])
570     cmds.append("openstack network create external --project service "
571                 "--external --provider-network-type {} "
572                 "--provider-physical-network datacentre".format(ext_type))
573     # create subnet command
574     cidr = net_config['cidr']
575     subnet_cmd = "openstack subnet create external-subnet --project " \
576                  "service --network external --no-dhcp --gateway {} " \
577                  "--allocation-pool start={},end={} --subnet-range " \
578                  "{}".format(gateway, pool_start, pool_end, str(cidr))
579     if external and cidr.version == 6:
580         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
581                       '--ipv6-address-mode slaac'
582     cmds.append(subnet_cmd)
583     logging.debug("Neutron external network commands determined "
584                   "as: {}".format(cmds))
585     return cmds
586
587
588 def create_congress_cmds(overcloud_file):
589     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
590     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
591     logging.info("Creating congress commands")
592     try:
593         ds_cfg = [
594             "username={}".format(overcloudrc['OS_USERNAME']),
595             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
596             "password={}".format(overcloudrc['OS_PASSWORD']),
597             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
598         ]
599     except KeyError:
600         logging.error("Unable to find all keys required for congress in "
601                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
602                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
603                       "file: {}".format(overcloud_file))
604         raise
605     cmds = list()
606     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
607
608     for driver in drivers:
609         if driver == 'doctor':
610             cmd = "{} \"{}\"".format(driver, driver)
611         else:
612             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
613         if driver == 'nova':
614             cmd += ' --config api_version="2.34"'
615         logging.debug("Congress command created: {}".format(cmd))
616         cmds.append(cmd)
617     return cmds