3ddb5f40710a68b981811dff074bbff8ef9b2e6b
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import shutil
15 import uuid
16 import struct
17 import time
18
19 from apex.common import constants as con
20 from apex.common.exceptions import ApexDeployException
21 from apex.common import parsers
22 from apex.virtual import utils as virt_utils
23 from cryptography.hazmat.primitives import serialization as \
24     crypto_serialization
25 from cryptography.hazmat.primitives.asymmetric import rsa
26 from cryptography.hazmat.backends import default_backend as \
27     crypto_default_backend
28
29
30 SDN_FILE_MAP = {
31     'opendaylight': {
32         'sfc': 'neutron-sfc-opendaylight.yaml',
33         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
34         'gluon': 'gluon.yaml',
35         'vpp': {
36             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
37             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
38             'default': 'neutron-opendaylight-honeycomb.yaml'
39         },
40         'default': 'neutron-opendaylight.yaml',
41     },
42     'onos': {
43         'sfc': 'neutron-onos-sfc.yaml',
44         'default': 'neutron-onos.yaml'
45     },
46     'ovn': 'neutron-ml2-ovn.yaml',
47     False: {
48         'vpp': 'neutron-ml2-vpp.yaml',
49         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
50     }
51 }
52
53 OTHER_FILE_MAP = {
54     'tacker': 'enable_tacker.yaml',
55     'congress': 'enable_congress.yaml',
56     'barometer': 'enable_barometer.yaml',
57     'rt_kvm': 'enable_rt_kvm.yaml'
58 }
59
60 OVS_PERF_MAP = {
61     'HostCpusList': 'dpdk_cores',
62     'NeutronDpdkCoreList': 'pmd_cores',
63     'NeutronDpdkSocketMemory': 'socket_memory',
64     'NeutronDpdkMemoryChannels': 'memory_channels'
65 }
66
67 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
68 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
69 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
70                       ".noarch.rpm"
71
72
73 def build_sdn_env_list(ds, sdn_map, env_list=None):
74     if env_list is None:
75         env_list = list()
76     for k, v in sdn_map.items():
77         if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
78             if isinstance(v, dict):
79                 env_list.extend(build_sdn_env_list(ds, v))
80             else:
81                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
82         elif isinstance(v, tuple):
83                 if ds[k] == v[0]:
84                     env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
85     if len(env_list) == 0:
86         try:
87             env_list.append(os.path.join(
88                 con.THT_ENV_DIR, sdn_map['default']))
89         except KeyError:
90             logging.warning("Unable to find default file for SDN")
91
92     return env_list
93
94
95 def create_deploy_cmd(ds, ns, inv, tmp_dir,
96                       virtual, env_file='opnfv-environment.yaml',
97                       net_data=False):
98
99     logging.info("Creating deployment command")
100     deploy_options = ['network-environment.yaml']
101
102     if env_file:
103         deploy_options.append(env_file)
104     ds_opts = ds['deploy_options']
105     deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
106
107     for k, v in OTHER_FILE_MAP.items():
108         if k in ds_opts and ds_opts[k]:
109             deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
110
111     if ds_opts['ceph']:
112         prep_storage_env(ds, tmp_dir)
113         deploy_options.append(os.path.join(con.THT_ENV_DIR,
114                                            'storage-environment.yaml'))
115     if ds['global_params']['ha_enabled']:
116         deploy_options.append(os.path.join(con.THT_ENV_DIR,
117                                            'puppet-pacemaker.yaml'))
118
119     if virtual:
120         deploy_options.append('virtual-environment.yaml')
121     else:
122         deploy_options.append('baremetal-environment.yaml')
123
124     num_control, num_compute = inv.get_node_counts()
125     if num_control == 0 or num_compute == 0:
126         logging.error("Detected 0 control or compute nodes.  Control nodes: "
127                       "{}, compute nodes{}".format(num_control, num_compute))
128         raise ApexDeployException("Invalid number of control or computes")
129     elif num_control > 1 and not ds['global_params']['ha_enabled']:
130         num_control = 1
131     cmd = "openstack overcloud deploy --templates --timeout {} " \
132           "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
133     # build cmd env args
134     for option in deploy_options:
135         cmd += " -e {}".format(option)
136     cmd += " --ntp-server {}".format(ns['ntp'][0])
137     cmd += " --control-scale {}".format(num_control)
138     cmd += " --compute-scale {}".format(num_compute)
139     cmd += ' --control-flavor control --compute-flavor compute'
140     if net_data:
141         cmd += ' --networks-file network_data.yaml'
142     logging.info("Deploy command set: {}".format(cmd))
143
144     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
145         fh.write(cmd)
146     return cmd
147
148
149 def prep_image(ds, img, tmp_dir, root_pw=None):
150     """
151     Locates sdn image and preps for deployment.
152     :param ds: deploy settings
153     :param img: sdn image
154     :param tmp_dir: dir to store modified sdn image
155     :param root_pw: password to configure for overcloud image
156     :return: None
157     """
158     # TODO(trozet): Come up with a better way to organize this logic in this
159     # function
160     logging.info("Preparing image: {} for deployment".format(img))
161     if not os.path.isfile(img):
162         logging.error("Missing SDN image {}".format(img))
163         raise ApexDeployException("Missing SDN image file: {}".format(img))
164
165     ds_opts = ds['deploy_options']
166     virt_cmds = list()
167     sdn = ds_opts['sdn_controller']
168     # we need this due to rhbz #1436021
169     # fixed in systemd-219-37.el7
170     if sdn is not False:
171         logging.info("Neutron openvswitch-agent disabled")
172         virt_cmds.extend([{
173             con.VIRT_RUN_CMD:
174                 "rm -f /etc/systemd/system/multi-user.target.wants/"
175                 "neutron-openvswitch-agent.service"},
176             {
177             con.VIRT_RUN_CMD:
178                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
179                 ".service"
180         }])
181
182     if ds_opts['vpn']:
183         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
184         virt_cmds.append({
185             con.VIRT_RUN_CMD:
186                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
187                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
188         virt_cmds.append({
189             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
190                               "zrpcd_start.sh"})
191         virt_cmds.append({
192             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
193                               "init.d/zrpcd_start.sh' /etc/rc.local "})
194         virt_cmds.append({
195             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
196                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
197         logging.info("ZRPCD process started")
198
199     dataplane = ds_opts['dataplane']
200     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
201         logging.info("Enabling kernel modules for dpdk")
202         # file to module mapping
203         uio_types = {
204             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
205             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
206         }
207         for mod_file, mod in uio_types.items():
208             with open(mod_file, 'w') as fh:
209                 fh.write('#!/bin/bash\n')
210                 fh.write('exec /sbin/modprobe {}'.format(mod))
211                 fh.close()
212
213             virt_cmds.extend([
214                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
215                     mod_file)},
216                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
217                                    "{}".format(os.path.basename(mod_file))}
218             ])
219     if root_pw:
220         pw_op = "password:{}".format(root_pw)
221         virt_cmds.append({con.VIRT_PW: pw_op})
222     if ds_opts['sfc'] and dataplane == 'ovs':
223         virt_cmds.extend([
224             {con.VIRT_RUN_CMD: "yum -y install "
225                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
226                                "{}".format(OVS_NSH_KMOD_RPM)},
227             {con.VIRT_RUN_CMD: "yum downgrade -y "
228                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
229                                "{}".format(OVS_NSH_RPM)}
230         ])
231     if dataplane == 'fdio':
232         # Patch neutron with using OVS external interface for router
233         # and add generic linux NS interface driver
234         virt_cmds.append(
235             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
236                                "-p1 < neutron-patch-NSDriver.patch"})
237         if sdn is False:
238             virt_cmds.extend([
239                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
240                 {con.VIRT_RUN_CMD: "yum install -y "
241                                    "/root/nosdn_vpp_rpms/*.rpm"}
242             ])
243
244     if sdn == 'opendaylight':
245         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
246             virt_cmds.extend([
247                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
248                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
249                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
250                                    "/root/puppet-opendaylight-"
251                                    "{}.tar.gz".format(ds_opts['odl_version'])}
252             ])
253             if ds_opts['odl_version'] == 'master':
254                 virt_cmds.extend([
255                     {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
256                         ds_opts['odl_version'])}
257                 ])
258             else:
259                 virt_cmds.extend([
260                     {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
261                         ds_opts['odl_version'])}
262                 ])
263
264         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
265                 and ds_opts['odl_vpp_netvirt']:
266             virt_cmds.extend([
267                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
268                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
269                     ODL_NETVIRT_VPP_RPM)}
270             ])
271
272     if sdn == 'ovn':
273         virt_cmds.extend([
274             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
275                                "*openvswitch*"},
276             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
277                                "*openvswitch*"}
278         ])
279
280     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
281     shutil.copyfile(img, tmp_oc_image)
282     logging.debug("Temporary overcloud image stored as: {}".format(
283         tmp_oc_image))
284     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
285     logging.info("Overcloud image customization complete")
286
287
288 def make_ssh_key():
289     """
290     Creates public and private ssh keys with 1024 bit RSA encryption
291     :return: private, public key
292     """
293     key = rsa.generate_private_key(
294         backend=crypto_default_backend(),
295         public_exponent=65537,
296         key_size=1024
297     )
298
299     private_key = key.private_bytes(
300         crypto_serialization.Encoding.PEM,
301         crypto_serialization.PrivateFormat.PKCS8,
302         crypto_serialization.NoEncryption())
303     public_key = key.public_key().public_bytes(
304         crypto_serialization.Encoding.OpenSSH,
305         crypto_serialization.PublicFormat.OpenSSH
306     )
307     return private_key.decode('utf-8'), public_key.decode('utf-8')
308
309
310 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
311     """
312     Creates modified opnfv/network environments for deployment
313     :param ds: deploy settings
314     :param ns: network settings
315     :param inv: node inventory
316     :param opnfv_env: file path for opnfv-environment file
317     :param net_env: file path for network-environment file
318     :param tmp_dir: Apex tmp dir
319     :return:
320     """
321
322     logging.info("Preparing opnfv-environment and network-environment files")
323     ds_opts = ds['deploy_options']
324     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
325     shutil.copyfile(opnfv_env, tmp_opnfv_env)
326     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
327     tenant_nic = dict()
328     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
329     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
330     external_nic_map = ns['networks']['external'][0]['nic_mapping']
331     external_nic = dict()
332     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
333
334     # SSH keys
335     private_key, public_key = make_ssh_key()
336
337     # Make easier/faster variables to index in the file editor
338     if 'performance' in ds_opts:
339         perf = True
340         # vpp
341         if 'vpp' in ds_opts['performance']['Compute']:
342             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
343         else:
344             perf_vpp_comp = None
345         if 'vpp' in ds_opts['performance']['Controller']:
346             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
347         else:
348             perf_vpp_ctrl = None
349
350         # ovs
351         if 'ovs' in ds_opts['performance']['Compute']:
352             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
353         else:
354             perf_ovs_comp = None
355
356         # kernel
357         if 'kernel' in ds_opts['performance']['Compute']:
358             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
359         else:
360             perf_kern_comp = None
361     else:
362         perf = False
363
364     # Modify OPNFV environment
365     # TODO: Change to build a dict and outputting yaml rather than parsing
366     for line in fileinput.input(tmp_opnfv_env, inplace=True):
367         line = line.strip('\n')
368         output_line = line
369         if 'CloudDomain' in line:
370             output_line = "  CloudDomain: {}".format(ns['domain_name'])
371         elif 'replace_private_key' in line:
372             output_line = "    private_key: |\n"
373             key_out = ''
374             for line in private_key.splitlines():
375                 key_out += "      {}\n".format(line)
376             output_line += key_out
377         elif 'replace_public_key' in line:
378             output_line = "    public_key: '{}'".format(public_key)
379         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
380                 'resource_registry' in line:
381             output_line = "resource_registry:\n" \
382                           "  OS::TripleO::NodeUserData: first-boot.yaml"
383         elif 'ComputeExtraConfigPre' in line and \
384                 ds_opts['dataplane'] == 'ovs_dpdk':
385             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
386                           './ovs-dpdk-preconfig.yaml'
387
388         if ds_opts['sdn_controller'] == 'opendaylight' and \
389                 'odl_vpp_routing_node' in ds_opts:
390             if 'opendaylight::vpp_routing_node' in line:
391                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
392                                .format(ds_opts['odl_vpp_routing_node'],
393                                        ns['domain_name']))
394         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
395             if 'NeutronVPPAgentPhysnets' in line:
396                 output_line = ("  NeutronVPPAgentPhysnets: 'datacentre:{}'".
397                                format(tenant_nic['Controller']))
398         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
399                 'dvr') is True:
400             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
401                 output_line = ''
402             elif 'NeutronDhcpAgentsPerNetwork' in line:
403                 num_control, num_compute = inv.get_node_counts()
404                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
405                                .format(num_compute))
406             elif 'ComputeServices' in line:
407                 output_line = ("  ComputeServices:\n"
408                                "    - OS::TripleO::Services::NeutronDhcpAgent")
409
410         if perf:
411             for role in 'NovaCompute', 'Controller':
412                 if role == 'NovaCompute':
413                     perf_opts = perf_vpp_comp
414                 else:
415                     perf_opts = perf_vpp_ctrl
416                 cfg = "{}ExtraConfig".format(role)
417                 if cfg in line and perf_opts:
418                     perf_line = ''
419                     if 'main-core' in perf_opts:
420                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
421                                       .format(perf_opts['main-core']))
422                     if 'corelist-workers' in perf_opts:
423                         perf_line += ("\n    "
424                                       "fdio::vpp_cpu_corelist_workers: '{}'"
425                                       .format(perf_opts['corelist-workers']))
426                     if ds_opts['sdn_controller'] == 'opendaylight' and \
427                             ds_opts['dataplane'] == 'fdio':
428                         if role == 'NovaCompute':
429                             perf_line += ("\n    "
430                                           "tripleo::profile::base::neutron::"
431                                           "agents::honeycomb::"
432                                           "interface_role_mapping:"
433                                           " ['{}:tenant-interface',"
434                                           "'{}:public-interface']"
435                                           .format(tenant_nic[role],
436                                                   external_nic[role]))
437                         else:
438                             perf_line += ("\n    "
439                                           "tripleo::profile::base::neutron::"
440                                           "agents::honeycomb::"
441                                           "interface_role_mapping:"
442                                           " ['{}:tenant-interface']"
443                                           .format(tenant_nic[role]))
444                     if perf_line:
445                         output_line = ("  {}:{}".format(cfg, perf_line))
446
447             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
448                 for k, v in OVS_PERF_MAP.items():
449                     if k in line and v in perf_ovs_comp:
450                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
451
452             # kernel args
453             # (FIXME) use compute's kernel settings for all nodes for now.
454             if perf_kern_comp:
455                 if 'NovaSchedulerDefaultFilters' in line:
456                     output_line = \
457                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
458                         "ComputeFilter,AvailabilityZoneFilter," \
459                         "ComputeCapabilitiesFilter," \
460                         "ImagePropertiesFilter,NUMATopologyFilter'"
461                 elif 'ComputeKernelArgs' in line:
462                     kernel_args = ''
463                     for k, v in perf_kern_comp.items():
464                         kernel_args += "{}={} ".format(k, v)
465                     if kernel_args:
466                         output_line = "  ComputeKernelArgs: '{}'".\
467                             format(kernel_args)
468
469         print(output_line)
470
471     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
472
473
474 def generate_ceph_key():
475     key = os.urandom(16)
476     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
477     return base64.b64encode(header + key)
478
479
480 def prep_storage_env(ds, tmp_dir):
481     """
482     Creates storage environment file for deployment.  Source file is copied by
483     undercloud playbook to host.
484     :param ds:
485     :param tmp_dir:
486     :return:
487     """
488     ds_opts = ds['deploy_options']
489     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
490     if not os.path.isfile(storage_file):
491         logging.error("storage-environment file is not in tmp directory: {}. "
492                       "Check if file was copied from "
493                       "undercloud".format(tmp_dir))
494         raise ApexDeployException("storage-environment file not copied from "
495                                   "undercloud")
496     for line in fileinput.input(storage_file, inplace=True):
497         line = line.strip('\n')
498         if 'CephClusterFSID' in line:
499             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
500         elif 'CephMonKey' in line:
501             print("  CephMonKey: {}".format(generate_ceph_key().decode(
502                 'utf-8')))
503         elif 'CephAdminKey' in line:
504             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
505                 'utf-8')))
506         else:
507             print(line)
508     if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
509         with open(storage_file, 'a') as fh:
510             fh.write('  ExtraConfig:\n')
511             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
512                 ds_opts['ceph_device']
513             ))
514
515
516 def external_network_cmds(ns):
517     """
518     Generates external network openstack commands
519     :param ns: network settings
520     :return: list of commands to configure external network
521     """
522     if 'external' in ns.enabled_network_list:
523         net_config = ns['networks']['external'][0]
524         external = True
525         pool_start, pool_end = net_config['floating_ip_range']
526     else:
527         net_config = ns['networks']['admin']
528         external = False
529         pool_start, pool_end = ns['apex']['networks']['admin'][
530             'introspection_range']
531     nic_config = net_config['nic_mapping']
532     gateway = net_config['gateway']
533     cmds = list()
534     # create network command
535     if nic_config['compute']['vlan'] == 'native':
536         ext_type = 'flat'
537     else:
538         ext_type = "vlan --provider-segment {}".format(nic_config[
539                                                        'compute']['vlan'])
540     cmds.append("openstack network create external --project service "
541                 "--external --provider-network-type {} "
542                 "--provider-physical-network datacentre".format(ext_type))
543     # create subnet command
544     cidr = net_config['cidr']
545     subnet_cmd = "openstack subnet create external-subnet --project " \
546                  "service --network external --no-dhcp --gateway {} " \
547                  "--allocation-pool start={},end={} --subnet-range " \
548                  "{}".format(gateway, pool_start, pool_end, str(cidr))
549     if external and cidr.version == 6:
550         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
551                       '--ipv6-address-mode slaac'
552     cmds.append(subnet_cmd)
553     logging.debug("Neutron external network commands determined "
554                   "as: {}".format(cmds))
555     return cmds
556
557
558 def create_congress_cmds(overcloud_file):
559     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
560     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
561     logging.info("Creating congress commands")
562     try:
563         ds_cfg = [
564             "username={}".format(overcloudrc['OS_USERNAME']),
565             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
566             "password={}".format(overcloudrc['OS_PASSWORD']),
567             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
568         ]
569     except KeyError:
570         logging.error("Unable to find all keys required for congress in "
571                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
572                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
573                       "file: {}".format(overcloud_file))
574         raise
575     cmds = list()
576     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
577
578     for driver in drivers:
579         if driver == 'doctor':
580             cmd = "{} \"{}\"".format(driver, driver)
581         else:
582             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
583         if driver == 'nova':
584             cmd += ' --config api_version="2.34"'
585         logging.debug("Congress command created: {}".format(cmd))
586         cmds.append(cmd)
587     return cmds