e32485366a8787365e779731a26e57d6cbb60f29
[apex.git] / apex / overcloud / overcloud_deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import re
15 import shutil
16 import uuid
17 import struct
18 import time
19
20 from apex.common import constants as con
21 from apex.common.exceptions import ApexDeployException
22 from apex.common import parsers
23 from apex.virtual import utils as virt_utils
24 from cryptography.hazmat.primitives import serialization as \
25     crypto_serialization
26 from cryptography.hazmat.primitives.asymmetric import rsa
27 from cryptography.hazmat.backends import default_backend as \
28     crypto_default_backend
29
30
31 SDN_FILE_MAP = {
32     'opendaylight': {
33         'sfc': 'neutron-sfc-opendaylight.yaml',
34         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
35         'gluon': 'gluon.yaml',
36         'vpp': {
37             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
38             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
39             'default': 'neutron-opendaylight-honeycomb.yaml'
40         },
41         'default': 'neutron-opendaylight.yaml',
42     },
43     'onos': {
44         'sfc': 'neutron-onos-sfc.yaml',
45         'default': 'neutron-onos.yaml'
46     },
47     'ovn': 'neutron-ml2-ovn.yaml',
48     False: {
49         'vpp': 'neutron-ml2-vpp.yaml',
50         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
51     }
52 }
53
54 OTHER_FILE_MAP = {
55     'tacker': 'enable_tacker.yaml',
56     'congress': 'enable_congress.yaml',
57     'barometer': 'enable_barometer.yaml',
58     'rt_kvm': 'enable_rt_kvm.yaml'
59 }
60
61 OVS_PERF_MAP = {
62     'HostCpusList': 'dpdk_cores',
63     'NeutronDpdkCoreList': 'pmd_cores',
64     'NeutronDpdkSocketMemory': 'socket_memory',
65     'NeutronDpdkMemoryChannels': 'memory_channels'
66 }
67
68 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
69 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
70 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
71                       ".noarch.rpm"
72
73
74 def build_sdn_env_list(ds, sdn_map, env_list=None):
75     if env_list is None:
76         env_list = list()
77     for k, v in sdn_map.items():
78         if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
79             if isinstance(v, dict):
80                 env_list.extend(build_sdn_env_list(ds, v))
81             else:
82                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
83         elif isinstance(v, tuple):
84                 if ds[k] == v[0]:
85                     env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
86     if len(env_list) == 0:
87         try:
88             env_list.append(os.path.join(
89                 con.THT_ENV_DIR, sdn_map['default']))
90         except KeyError:
91             logging.warning("Unable to find default file for SDN")
92
93     return env_list
94
95
96 def _get_node_counts(inventory):
97     """
98     Return numbers of controller and compute nodes in inventory
99
100     :param inventory: node inventory data structure
101     :return: number of controller and compute nodes in inventory
102     """
103     if not inventory:
104         raise ApexDeployException("Empty inventory")
105
106     nodes = inventory['nodes']
107     num_control = 0
108     num_compute = 0
109     for node in nodes:
110         if node['capabilities'] == 'profile:control':
111             num_control += 1
112         elif node['capabilities'] == 'profile:compute':
113             num_compute += 1
114         else:
115             # TODO(trozet) do we want to allow capabilities to not exist?
116             logging.error("Every node must include a 'capabilities' key "
117                           "tagged with either 'profile:control' or "
118                           "'profile:compute'")
119             raise ApexDeployException("Node missing capabilities "
120                                       "key: {}".format(node))
121     return num_control, num_compute
122
123
124 def create_deploy_cmd(ds, ns, inv, tmp_dir,
125                       virtual, env_file='opnfv-environment.yaml'):
126
127     logging.info("Creating deployment command")
128     deploy_options = [env_file, 'network-environment.yaml']
129     ds_opts = ds['deploy_options']
130     deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
131
132     for k, v in OTHER_FILE_MAP.items():
133         if k in ds_opts and ds_opts[k]:
134             deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
135
136     if ds_opts['ceph']:
137         prep_storage_env(ds, tmp_dir)
138         deploy_options.append(os.path.join(con.THT_ENV_DIR,
139                                            'storage-environment.yaml'))
140     if ds['global_params']['ha_enabled']:
141         deploy_options.append(os.path.join(con.THT_ENV_DIR,
142                                            'puppet-pacemaker.yaml'))
143
144     if virtual:
145         deploy_options.append('virtual-environment.yaml')
146     else:
147         deploy_options.append('baremetal-environment.yaml')
148
149     num_control, num_compute = _get_node_counts(inv)
150     if num_control == 0 or num_compute == 0:
151         logging.error("Detected 0 control or compute nodes.  Control nodes: "
152                       "{}, compute nodes{}".format(num_control, num_compute))
153         raise ApexDeployException("Invalid number of control or computes")
154     elif num_control > 1 and not ds['global_params']['ha_enabled']:
155         num_control = 1
156     cmd = "openstack overcloud deploy --templates --timeout {} " \
157           "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
158     # build cmd env args
159     for option in deploy_options:
160         cmd += " -e {}".format(option)
161     cmd += " --ntp-server {}".format(ns['ntp'][0])
162     cmd += " --control-scale {}".format(num_control)
163     cmd += " --compute-scale {}".format(num_compute)
164     cmd += ' --control-flavor control --compute-flavor compute'
165     logging.info("Deploy command set: {}".format(cmd))
166
167     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
168         fh.write(cmd)
169     return cmd
170
171
172 def prep_image(ds, img, tmp_dir, root_pw=None):
173     """
174     Locates sdn image and preps for deployment.
175     :param ds: deploy settings
176     :param img: sdn image
177     :param tmp_dir: dir to store modified sdn image
178     :param root_pw: password to configure for overcloud image
179     :return: None
180     """
181     # TODO(trozet): Come up with a better way to organize this logic in this
182     # function
183     logging.info("Preparing image: {} for deployment".format(img))
184     if not os.path.isfile(img):
185         logging.error("Missing SDN image {}".format(img))
186         raise ApexDeployException("Missing SDN image file: {}".format(img))
187
188     ds_opts = ds['deploy_options']
189     virt_cmds = list()
190     sdn = ds_opts['sdn_controller']
191     # we need this due to rhbz #1436021
192     # fixed in systemd-219-37.el7
193     if sdn is not False:
194         logging.info("Neutron openvswitch-agent disabled")
195         virt_cmds.extend([{
196             con.VIRT_RUN_CMD:
197                 "rm -f /etc/systemd/system/multi-user.target.wants/"
198                 "neutron-openvswitch-agent.service"},
199             {
200             con.VIRT_RUN_CMD:
201                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
202                 ".service"
203         }])
204
205     if ds_opts['vpn']:
206         virt_cmds.append({con.VIRT_RUN_CMD: "systemctl enable zrpcd"})
207         logging.info("ZRPC and Quagga enabled")
208
209     dataplane = ds_opts['dataplane']
210     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
211         logging.info("Enabling kernel modules for dpdk")
212         # file to module mapping
213         uio_types = {
214             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
215             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
216         }
217         for mod_file, mod in uio_types.items():
218             with open(mod_file, 'w') as fh:
219                 fh.write('#!/bin/bash\n')
220                 fh.write('exec /sbin/modprobe {}'.format(mod))
221                 fh.close()
222
223             virt_cmds.extend([
224                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
225                     mod_file)},
226                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
227                                    "{}".format(os.path.basename(mod_file))}
228             ])
229     if root_pw:
230         pw_op = "password:{}".format(root_pw)
231         virt_cmds.append({con.VIRT_PW: pw_op})
232     if ds_opts['sfc'] and dataplane == 'ovs':
233         virt_cmds.extend([
234             {con.VIRT_RUN_CMD: "yum -y install "
235                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
236                                "{}".format(OVS_NSH_KMOD_RPM)},
237             {con.VIRT_RUN_CMD: "yum downgrade -y "
238                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
239                                "{}".format(OVS_NSH_RPM)}
240         ])
241     if dataplane == 'fdio':
242         # Patch neutron with using OVS external interface for router
243         # and add generic linux NS interface driver
244         virt_cmds.append(
245             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
246                                "-p1 < neutron-patch-NSDriver.patch"})
247         if sdn is False:
248             virt_cmds.extend([
249                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
250                 {con.VIRT_RUN_CMD: "yum install -y "
251                                    "/root/nosdn_vpp_rpms/*.rpm"}
252             ])
253
254     if sdn == 'opendaylight':
255         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
256             virt_cmds.extend([
257                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
258                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
259                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
260                                    "/root/puppet-opendaylight-"
261                                    "{}.tar.gz".format(ds_opts['odl_version'])}
262             ])
263             if ds_opts['odl_version'] == 'master':
264                 virt_cmds.extend([
265                     {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
266                         ds_opts['odl_version'])}
267                 ])
268             else:
269                 virt_cmds.extend([
270                     {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
271                         ds_opts['odl_version'])}
272                 ])
273
274         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
275                 and ds_opts['odl_vpp_netvirt']:
276             virt_cmds.extend([
277                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
278                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
279                     ODL_NETVIRT_VPP_RPM)}
280             ])
281
282     if sdn == 'ovn':
283         virt_cmds.extend([
284             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
285                                "*openvswitch*"},
286             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
287                                "*openvswitch*"}
288         ])
289
290     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
291     shutil.copyfile(img, tmp_oc_image)
292     logging.debug("Temporary overcloud image stored as: {}".format(
293         tmp_oc_image))
294     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
295     logging.info("Overcloud image customization complete")
296
297
298 def make_ssh_key():
299     """
300     Creates public and private ssh keys with 1024 bit RSA encryption
301     :return: private, public key
302     """
303     key = rsa.generate_private_key(
304         backend=crypto_default_backend(),
305         public_exponent=65537,
306         key_size=1024
307     )
308
309     private_key = key.private_bytes(
310         crypto_serialization.Encoding.PEM,
311         crypto_serialization.PrivateFormat.PKCS8,
312         crypto_serialization.NoEncryption())
313     public_key = key.public_key().public_bytes(
314         crypto_serialization.Encoding.OpenSSH,
315         crypto_serialization.PublicFormat.OpenSSH
316     )
317     return private_key.decode('utf-8'), public_key.decode('utf-8')
318
319
320 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
321     """
322     Creates modified opnfv/network environments for deployment
323     :param ds: deploy settings
324     :param ns: network settings
325     :param inv: node inventory
326     :param opnfv_env: file path for opnfv-environment file
327     :param net_env: file path for network-environment file
328     :param tmp_dir: Apex tmp dir
329     :return:
330     """
331
332     logging.info("Preparing opnfv-environment and network-environment files")
333     ds_opts = ds['deploy_options']
334     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
335     shutil.copyfile(opnfv_env, tmp_opnfv_env)
336     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
337     tenant_ctrl_nic = tenant_nic_map['controller']['members'][0]
338     tenant_comp_nic = tenant_nic_map['compute']['members'][0]
339
340     # SSH keys
341     private_key, public_key = make_ssh_key()
342
343     # Make easier/faster variables to index in the file editor
344     if 'performance' in ds_opts:
345         perf = True
346         # vpp
347         if 'vpp' in ds_opts['performance']['Compute']:
348             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
349         else:
350             perf_vpp_comp = None
351         if 'vpp' in ds_opts['performance']['Controller']:
352             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
353         else:
354             perf_vpp_ctrl = None
355
356         # ovs
357         if 'ovs' in ds_opts['performance']['Compute']:
358             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
359         else:
360             perf_ovs_comp = None
361
362         # kernel
363         if 'kernel' in ds_opts['performance']['Compute']:
364             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
365         else:
366             perf_kern_comp = None
367     else:
368         perf = False
369
370     # Modify OPNFV environment
371     # TODO: Change to build a dict and outputing yaml rather than parsing
372     for line in fileinput.input(tmp_opnfv_env, inplace=True):
373         line = line.strip('\n')
374         output_line = line
375         if 'CloudDomain' in line:
376             output_line = "  CloudDomain: {}".format(ns['domain_name'])
377         elif 'replace_private_key' in line:
378             output_line = "    private_key: |\n"
379             key_out = ''
380             for line in private_key.splitlines():
381                 key_out += "      {}\n".format(line)
382             output_line += key_out
383         elif 'replace_public_key' in line:
384             output_line = "    public_key: '{}'".format(public_key)
385
386         if ds_opts['sdn_controller'] == 'opendaylight' and \
387                 'odl_vpp_routing_node' in ds_opts:
388             if 'opendaylight::vpp_routing_node' in line:
389                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
390                                .format(ds_opts['odl_vpp_routing_node'],
391                                        ns['domain_name']))
392             elif 'ControllerExtraConfig' in line:
393                 output_line = ("  ControllerExtraConfig:\n    "
394                                "tripleo::profile::base::neutron::agents::"
395                                "honeycomb::interface_role_mapping:"
396                                " ['{}:tenant-interface]'"
397                                .format(tenant_ctrl_nic))
398             elif 'NovaComputeExtraConfig' in line:
399                 output_line = ("  NovaComputeExtraConfig:\n    "
400                                "tripleo::profile::base::neutron::agents::"
401                                "honeycomb::interface_role_mapping:"
402                                " ['{}:tenant-interface]'"
403                                .format(tenant_comp_nic))
404         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
405             if 'NeutronVPPAgentPhysnets' in line:
406                 output_line = ("  NeutronVPPAgentPhysnets: 'datacentre:{}'".
407                                format(tenant_ctrl_nic))
408         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
409                 'dvr') is True:
410             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
411                 output_line = ''
412             elif 'NeutronDhcpAgentsPerNetwork' in line:
413                 num_control, num_compute = _get_node_counts(inv)
414                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
415                                .format(num_compute))
416             elif 'ComputeServices' in line:
417                 output_line = ("  ComputeServices:\n"
418                                "    - OS::TripleO::Services::NeutronDhcpAgent")
419
420         if perf:
421             for role in 'NovaCompute', 'Controller':
422                 if role == 'NovaCompute':
423                     perf_opts = perf_vpp_comp
424                 else:
425                     perf_opts = perf_vpp_ctrl
426                 cfg = "{}ExtraConfig".format(role)
427                 if cfg in line and perf_opts:
428                     perf_line = ''
429                     if 'main-core' in perf_opts:
430                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
431                                       .format(perf_opts['main-core']))
432                     if 'corelist-workers' in perf_opts:
433                         perf_line += ("\n    "
434                                       "fdio::vpp_cpu_corelist_workers: '{}'"
435                                       .format(perf_opts['corelist-workers']))
436                     if perf_line:
437                         output_line = ("  {}:{}".format(cfg, perf_line))
438
439             # kernel args
440             # (FIXME) use compute's kernel settings for all nodes for now.
441             if 'ComputeKernelArgs' in line and perf_kern_comp:
442                 kernel_args = ''
443                 for k, v in perf_kern_comp.items():
444                     kernel_args += "{}={} ".format(k, v)
445                 if kernel_args:
446                     output_line = "  ComputeKernelArgs: '{}'".\
447                         format(kernel_args)
448             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
449                 for k, v in OVS_PERF_MAP.items():
450                     if k in line and v in perf_ovs_comp:
451                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
452
453         print(output_line)
454
455     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
456
457     # Modify Network environment
458     for line in fileinput.input(net_env, inplace=True):
459         line = line.strip('\n')
460         if 'ComputeExtraConfigPre' in line and \
461                 ds_opts['dataplane'] == 'ovs_dpdk':
462             print('  OS::TripleO::ComputeExtraConfigPre: '
463                   './ovs-dpdk-preconfig.yaml')
464         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
465                 'resource_registry' in line:
466             print("resource_registry:\n"
467                   "  OS::TripleO::NodeUserData: first-boot.yaml")
468         elif perf and perf_kern_comp and \
469                 'NovaSchedulerDefaultFilters' in line:
470             print("  NovaSchedulerDefaultFilters: 'RamFilter,"
471                   "ComputeFilter,AvailabilityZoneFilter,"
472                   "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
473                   "NUMATopologyFilter'")
474         else:
475             print(line)
476
477     logging.info("network-environment file written to {}".format(net_env))
478
479
480 def generate_ceph_key():
481     key = os.urandom(16)
482     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
483     return base64.b64encode(header + key)
484
485
486 def prep_storage_env(ds, tmp_dir):
487     """
488     Creates storage environment file for deployment.  Source file is copied by
489     undercloud playbook to host.
490     :param ds:
491     :param tmp_dir:
492     :return:
493     """
494     ds_opts = ds['deploy_options']
495     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
496     if not os.path.isfile(storage_file):
497         logging.error("storage-environment file is not in tmp directory: {}. "
498                       "Check if file was copied from "
499                       "undercloud".format(tmp_dir))
500         raise ApexDeployException("storage-environment file not copied from "
501                                   "undercloud")
502     for line in fileinput.input(storage_file, inplace=True):
503         line = line.strip('\n')
504         if 'CephClusterFSID' in line:
505             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
506         elif 'CephMonKey' in line:
507             print("  CephMonKey: {}".format(generate_ceph_key().decode(
508                 'utf-8')))
509         elif 'CephAdminKey' in line:
510             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
511                 'utf-8')))
512         else:
513             print(line)
514     if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
515         with open(storage_file, 'a') as fh:
516             fh.write('  ExtraConfig:\n')
517             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
518                 ds_opts['ceph_device']
519             ))
520
521
522 def external_network_cmds(ns):
523     """
524     Generates external network openstack commands
525     :param ns: network settings
526     :return: list of commands to configure external network
527     """
528     if 'external' in ns.enabled_network_list:
529         net_config = ns['networks']['external'][0]
530         external = True
531         pool_start, pool_end = net_config['floating_ip_range']
532     else:
533         net_config = ns['networks']['admin']
534         external = False
535         pool_start, pool_end = ns['apex']['networks']['admin'][
536             'introspection_range']
537     nic_config = net_config['nic_mapping']
538     gateway = net_config['gateway']
539     cmds = list()
540     # create network command
541     if nic_config['compute']['vlan'] == 'native':
542         ext_type = 'flat'
543     else:
544         ext_type = "vlan --provider-segment {}".format(nic_config[
545                                                        'compute']['vlan'])
546     cmds.append("openstack network create external --project service "
547                 "--external --provider-network-type {} "
548                 "--provider-physical-network datacentre".format(ext_type))
549     # create subnet command
550     cidr = net_config['cidr']
551     subnet_cmd = "openstack subnet create external-subnet --project " \
552                  "service --network external --no-dhcp --gateway {} " \
553                  "--allocation-pool start={},end={} --subnet-range " \
554                  "{}".format(gateway, pool_start, pool_end, str(cidr))
555     if external and cidr.version == 6:
556         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
557                       '--ipv6-address-mode slaac'
558     cmds.append(subnet_cmd)
559     logging.debug("Neutron external network commands determined "
560                   "as: {}".format(cmds))
561     return cmds
562
563
564 def create_congress_cmds(overcloud_file):
565     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
566     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
567     logging.info("Creating congress commands")
568     try:
569         ds_cfg = [
570             "username={}".format(overcloudrc['OS_USERNAME']),
571             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
572             "password={}".format(overcloudrc['OS_PASSWORD']),
573             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
574         ]
575     except KeyError:
576         logging.error("Unable to find all keys required for congress in "
577                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
578                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
579                       "file: {}".format(overcloud_file))
580         raise
581     cmds = list()
582     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
583
584     for driver in drivers:
585         if driver == 'doctor':
586             cmd = "{} \"{}\"".format(driver, driver)
587         else:
588             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
589         if driver == 'nova':
590             cmd += ' --config api_version="2.34"'
591         logging.debug("Congress command created: {}".format(cmd))
592         cmds.append(cmd)
593     return cmds