Merge "barometer: move to opnfv puppet repo"
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import shutil
15 import uuid
16 import struct
17 import time
18
19 from apex.common import constants as con
20 from apex.common.exceptions import ApexDeployException
21 from apex.common import parsers
22 from apex.virtual import utils as virt_utils
23 from cryptography.hazmat.primitives import serialization as \
24     crypto_serialization
25 from cryptography.hazmat.primitives.asymmetric import rsa
26 from cryptography.hazmat.backends import default_backend as \
27     crypto_default_backend
28
29
30 SDN_FILE_MAP = {
31     'opendaylight': {
32         'sfc': 'neutron-sfc-opendaylight.yaml',
33         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
34         'gluon': 'gluon.yaml',
35         'vpp': {
36             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
37             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
38             'default': 'neutron-opendaylight-honeycomb.yaml'
39         },
40         'default': 'neutron-opendaylight.yaml',
41     },
42     'onos': {
43         'sfc': 'neutron-onos-sfc.yaml',
44         'default': 'neutron-onos.yaml'
45     },
46     'ovn': 'neutron-ml2-ovn.yaml',
47     False: {
48         'vpp': 'neutron-ml2-vpp.yaml',
49         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
50     }
51 }
52
53 OTHER_FILE_MAP = {
54     'tacker': 'enable_tacker.yaml',
55     'congress': 'enable_congress.yaml',
56     'barometer': 'enable_barometer.yaml',
57     'rt_kvm': 'enable_rt_kvm.yaml'
58 }
59
60 OVS_PERF_MAP = {
61     'HostCpusList': 'dpdk_cores',
62     'NeutronDpdkCoreList': 'pmd_cores',
63     'NeutronDpdkSocketMemory': 'socket_memory',
64     'NeutronDpdkMemoryChannels': 'memory_channels'
65 }
66
67 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
68 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
69 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
70                       ".noarch.rpm"
71
72
73 def build_sdn_env_list(ds, sdn_map, env_list=None):
74     if env_list is None:
75         env_list = list()
76     for k, v in sdn_map.items():
77         if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
78             if isinstance(v, dict):
79                 env_list.extend(build_sdn_env_list(ds, v))
80             else:
81                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
82         elif isinstance(v, tuple):
83                 if ds[k] == v[0]:
84                     env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
85     if len(env_list) == 0:
86         try:
87             env_list.append(os.path.join(
88                 con.THT_ENV_DIR, sdn_map['default']))
89         except KeyError:
90             logging.warning("Unable to find default file for SDN")
91
92     return env_list
93
94
95 def create_deploy_cmd(ds, ns, inv, tmp_dir,
96                       virtual, env_file='opnfv-environment.yaml',
97                       net_data=False):
98
99     logging.info("Creating deployment command")
100     deploy_options = ['network-environment.yaml']
101
102     if env_file:
103         deploy_options.append(env_file)
104     ds_opts = ds['deploy_options']
105     deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
106
107     for k, v in OTHER_FILE_MAP.items():
108         if k in ds_opts and ds_opts[k]:
109             deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
110
111     if ds_opts['ceph']:
112         prep_storage_env(ds, tmp_dir)
113         deploy_options.append(os.path.join(con.THT_ENV_DIR,
114                                            'storage-environment.yaml'))
115     if ds['global_params']['ha_enabled']:
116         deploy_options.append(os.path.join(con.THT_ENV_DIR,
117                                            'puppet-pacemaker.yaml'))
118
119     if virtual:
120         deploy_options.append('virtual-environment.yaml')
121     else:
122         deploy_options.append('baremetal-environment.yaml')
123
124     num_control, num_compute = inv.get_node_counts()
125     if num_control == 0 or num_compute == 0:
126         logging.error("Detected 0 control or compute nodes.  Control nodes: "
127                       "{}, compute nodes{}".format(num_control, num_compute))
128         raise ApexDeployException("Invalid number of control or computes")
129     elif num_control > 1 and not ds['global_params']['ha_enabled']:
130         num_control = 1
131     cmd = "openstack overcloud deploy --templates --timeout {} " \
132           .format(con.DEPLOY_TIMEOUT)
133     # build cmd env args
134     for option in deploy_options:
135         cmd += " -e {}".format(option)
136     cmd += " --ntp-server {}".format(ns['ntp'][0])
137     cmd += " --control-scale {}".format(num_control)
138     cmd += " --compute-scale {}".format(num_compute)
139     cmd += ' --control-flavor control --compute-flavor compute'
140     if net_data:
141         cmd += ' --networks-file network_data.yaml'
142     libvirt_type = 'kvm'
143     if virtual:
144         with open('/sys/module/kvm_intel/parameters/nested') as f:
145             nested_kvm = f.read().strip()
146             if nested_kvm != 'Y':
147                 libvirt_type = 'qemu'
148     cmd += ' --libvirt-type {}'.format(libvirt_type)
149     logging.info("Deploy command set: {}".format(cmd))
150
151     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
152         fh.write(cmd)
153     return cmd
154
155
156 def prep_image(ds, img, tmp_dir, root_pw=None):
157     """
158     Locates sdn image and preps for deployment.
159     :param ds: deploy settings
160     :param img: sdn image
161     :param tmp_dir: dir to store modified sdn image
162     :param root_pw: password to configure for overcloud image
163     :return: None
164     """
165     # TODO(trozet): Come up with a better way to organize this logic in this
166     # function
167     logging.info("Preparing image: {} for deployment".format(img))
168     if not os.path.isfile(img):
169         logging.error("Missing SDN image {}".format(img))
170         raise ApexDeployException("Missing SDN image file: {}".format(img))
171
172     ds_opts = ds['deploy_options']
173     virt_cmds = list()
174     sdn = ds_opts['sdn_controller']
175     # we need this due to rhbz #1436021
176     # fixed in systemd-219-37.el7
177     if sdn is not False:
178         logging.info("Neutron openvswitch-agent disabled")
179         virt_cmds.extend([{
180             con.VIRT_RUN_CMD:
181                 "rm -f /etc/systemd/system/multi-user.target.wants/"
182                 "neutron-openvswitch-agent.service"},
183             {
184             con.VIRT_RUN_CMD:
185                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
186                 ".service"
187         }])
188
189     if ds_opts['vpn']:
190         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
191         virt_cmds.append({
192             con.VIRT_RUN_CMD:
193                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
194                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
195         virt_cmds.append({
196             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
197                               "zrpcd_start.sh"})
198         virt_cmds.append({
199             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
200                               "init.d/zrpcd_start.sh' /etc/rc.local "})
201         virt_cmds.append({
202             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
203                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
204         logging.info("ZRPCD process started")
205
206     dataplane = ds_opts['dataplane']
207     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
208         logging.info("Enabling kernel modules for dpdk")
209         # file to module mapping
210         uio_types = {
211             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
212             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
213         }
214         for mod_file, mod in uio_types.items():
215             with open(mod_file, 'w') as fh:
216                 fh.write('#!/bin/bash\n')
217                 fh.write('exec /sbin/modprobe {}'.format(mod))
218                 fh.close()
219
220             virt_cmds.extend([
221                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
222                     mod_file)},
223                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
224                                    "{}".format(os.path.basename(mod_file))}
225             ])
226     if root_pw:
227         pw_op = "password:{}".format(root_pw)
228         virt_cmds.append({con.VIRT_PW: pw_op})
229     if ds_opts['sfc'] and dataplane == 'ovs':
230         virt_cmds.extend([
231             {con.VIRT_RUN_CMD: "yum -y install "
232                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
233                                "{}".format(OVS_NSH_KMOD_RPM)},
234             {con.VIRT_RUN_CMD: "yum downgrade -y "
235                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
236                                "{}".format(OVS_NSH_RPM)}
237         ])
238     if dataplane == 'fdio':
239         # Patch neutron with using OVS external interface for router
240         # and add generic linux NS interface driver
241         virt_cmds.append(
242             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
243                                "-p1 < neutron-patch-NSDriver.patch"})
244         if sdn is False:
245             virt_cmds.extend([
246                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
247                 {con.VIRT_RUN_CMD: "yum install -y "
248                                    "/root/nosdn_vpp_rpms/*.rpm"}
249             ])
250
251     if sdn == 'opendaylight':
252         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
253             virt_cmds.extend([
254                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
255                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
256                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
257                                    "/root/puppet-opendaylight-"
258                                    "{}.tar.gz".format(ds_opts['odl_version'])}
259             ])
260             if ds_opts['odl_version'] == 'master':
261                 virt_cmds.extend([
262                     {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
263                         ds_opts['odl_version'])}
264                 ])
265             else:
266                 virt_cmds.extend([
267                     {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
268                         ds_opts['odl_version'])}
269                 ])
270
271         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
272                 and ds_opts['odl_vpp_netvirt']:
273             virt_cmds.extend([
274                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
275                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
276                     ODL_NETVIRT_VPP_RPM)}
277             ])
278
279     if sdn == 'ovn':
280         virt_cmds.extend([
281             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
282                                "*openvswitch*"},
283             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
284                                "*openvswitch*"}
285         ])
286
287     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
288     shutil.copyfile(img, tmp_oc_image)
289     logging.debug("Temporary overcloud image stored as: {}".format(
290         tmp_oc_image))
291     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
292     logging.info("Overcloud image customization complete")
293
294
295 def make_ssh_key():
296     """
297     Creates public and private ssh keys with 1024 bit RSA encryption
298     :return: private, public key
299     """
300     key = rsa.generate_private_key(
301         backend=crypto_default_backend(),
302         public_exponent=65537,
303         key_size=1024
304     )
305
306     private_key = key.private_bytes(
307         crypto_serialization.Encoding.PEM,
308         crypto_serialization.PrivateFormat.PKCS8,
309         crypto_serialization.NoEncryption())
310     public_key = key.public_key().public_bytes(
311         crypto_serialization.Encoding.OpenSSH,
312         crypto_serialization.PublicFormat.OpenSSH
313     )
314     return private_key.decode('utf-8'), public_key.decode('utf-8')
315
316
317 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
318     """
319     Creates modified opnfv/network environments for deployment
320     :param ds: deploy settings
321     :param ns: network settings
322     :param inv: node inventory
323     :param opnfv_env: file path for opnfv-environment file
324     :param net_env: file path for network-environment file
325     :param tmp_dir: Apex tmp dir
326     :return:
327     """
328
329     logging.info("Preparing opnfv-environment and network-environment files")
330     ds_opts = ds['deploy_options']
331     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
332     shutil.copyfile(opnfv_env, tmp_opnfv_env)
333     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
334     tenant_nic = dict()
335     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
336     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
337     external_nic_map = ns['networks']['external'][0]['nic_mapping']
338     external_nic = dict()
339     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
340
341     # SSH keys
342     private_key, public_key = make_ssh_key()
343
344     # Make easier/faster variables to index in the file editor
345     if 'performance' in ds_opts:
346         perf = True
347         # vpp
348         if 'vpp' in ds_opts['performance']['Compute']:
349             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
350         else:
351             perf_vpp_comp = None
352         if 'vpp' in ds_opts['performance']['Controller']:
353             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
354         else:
355             perf_vpp_ctrl = None
356
357         # ovs
358         if 'ovs' in ds_opts['performance']['Compute']:
359             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
360         else:
361             perf_ovs_comp = None
362
363         # kernel
364         if 'kernel' in ds_opts['performance']['Compute']:
365             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
366         else:
367             perf_kern_comp = None
368     else:
369         perf = False
370
371     # Modify OPNFV environment
372     # TODO: Change to build a dict and outputting yaml rather than parsing
373     for line in fileinput.input(tmp_opnfv_env, inplace=True):
374         line = line.strip('\n')
375         output_line = line
376         if 'CloudDomain' in line:
377             output_line = "  CloudDomain: {}".format(ns['domain_name'])
378         elif 'replace_private_key' in line:
379             output_line = "    private_key: |\n"
380             key_out = ''
381             for line in private_key.splitlines():
382                 key_out += "      {}\n".format(line)
383             output_line += key_out
384         elif 'replace_public_key' in line:
385             output_line = "    public_key: '{}'".format(public_key)
386         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
387                 'resource_registry' in line:
388             output_line = "resource_registry:\n" \
389                           "  OS::TripleO::NodeUserData: first-boot.yaml"
390         elif 'ComputeExtraConfigPre' in line and \
391                 ds_opts['dataplane'] == 'ovs_dpdk':
392             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
393                           './ovs-dpdk-preconfig.yaml'
394
395         if ds_opts['sdn_controller'] == 'opendaylight' and \
396                 'odl_vpp_routing_node' in ds_opts:
397             if 'opendaylight::vpp_routing_node' in line:
398                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
399                                .format(ds_opts['odl_vpp_routing_node'],
400                                        ns['domain_name']))
401         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
402             if 'NeutronVPPAgentPhysnets' in line:
403                 output_line = ("  NeutronVPPAgentPhysnets: 'datacentre:{}'".
404                                format(tenant_nic['Controller']))
405         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
406                 'dvr') is True:
407             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
408                 output_line = ''
409             elif 'NeutronDhcpAgentsPerNetwork' in line:
410                 num_control, num_compute = inv.get_node_counts()
411                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
412                                .format(num_compute))
413             elif 'ComputeServices' in line:
414                 output_line = ("  ComputeServices:\n"
415                                "    - OS::TripleO::Services::NeutronDhcpAgent")
416
417         if perf:
418             for role in 'NovaCompute', 'Controller':
419                 if role == 'NovaCompute':
420                     perf_opts = perf_vpp_comp
421                 else:
422                     perf_opts = perf_vpp_ctrl
423                 cfg = "{}ExtraConfig".format(role)
424                 if cfg in line and perf_opts:
425                     perf_line = ''
426                     if 'main-core' in perf_opts:
427                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
428                                       .format(perf_opts['main-core']))
429                     if 'corelist-workers' in perf_opts:
430                         perf_line += ("\n    "
431                                       "fdio::vpp_cpu_corelist_workers: '{}'"
432                                       .format(perf_opts['corelist-workers']))
433                     if ds_opts['sdn_controller'] == 'opendaylight' and \
434                             ds_opts['dataplane'] == 'fdio':
435                         if role == 'NovaCompute':
436                             perf_line += ("\n    "
437                                           "tripleo::profile::base::neutron::"
438                                           "agents::honeycomb::"
439                                           "interface_role_mapping:"
440                                           " ['{}:tenant-interface',"
441                                           "'{}:public-interface']"
442                                           .format(tenant_nic[role],
443                                                   external_nic[role]))
444                         else:
445                             perf_line += ("\n    "
446                                           "tripleo::profile::base::neutron::"
447                                           "agents::honeycomb::"
448                                           "interface_role_mapping:"
449                                           " ['{}:tenant-interface']"
450                                           .format(tenant_nic[role]))
451                     if perf_line:
452                         output_line = ("  {}:{}".format(cfg, perf_line))
453
454             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
455                 for k, v in OVS_PERF_MAP.items():
456                     if k in line and v in perf_ovs_comp:
457                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
458
459             # kernel args
460             # (FIXME) use compute's kernel settings for all nodes for now.
461             if perf_kern_comp:
462                 if 'NovaSchedulerDefaultFilters' in line:
463                     output_line = \
464                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
465                         "ComputeFilter,AvailabilityZoneFilter," \
466                         "ComputeCapabilitiesFilter," \
467                         "ImagePropertiesFilter,NUMATopologyFilter'"
468                 elif 'ComputeKernelArgs' in line:
469                     kernel_args = ''
470                     for k, v in perf_kern_comp.items():
471                         kernel_args += "{}={} ".format(k, v)
472                     if kernel_args:
473                         output_line = "  ComputeKernelArgs: '{}'".\
474                             format(kernel_args)
475
476         print(output_line)
477
478     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
479
480
481 def generate_ceph_key():
482     key = os.urandom(16)
483     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
484     return base64.b64encode(header + key)
485
486
487 def prep_storage_env(ds, tmp_dir):
488     """
489     Creates storage environment file for deployment.  Source file is copied by
490     undercloud playbook to host.
491     :param ds:
492     :param tmp_dir:
493     :return:
494     """
495     ds_opts = ds['deploy_options']
496     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
497     if not os.path.isfile(storage_file):
498         logging.error("storage-environment file is not in tmp directory: {}. "
499                       "Check if file was copied from "
500                       "undercloud".format(tmp_dir))
501         raise ApexDeployException("storage-environment file not copied from "
502                                   "undercloud")
503     for line in fileinput.input(storage_file, inplace=True):
504         line = line.strip('\n')
505         if 'CephClusterFSID' in line:
506             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
507         elif 'CephMonKey' in line:
508             print("  CephMonKey: {}".format(generate_ceph_key().decode(
509                 'utf-8')))
510         elif 'CephAdminKey' in line:
511             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
512                 'utf-8')))
513         else:
514             print(line)
515     if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
516         with open(storage_file, 'a') as fh:
517             fh.write('  ExtraConfig:\n')
518             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
519                 ds_opts['ceph_device']
520             ))
521
522
523 def external_network_cmds(ns):
524     """
525     Generates external network openstack commands
526     :param ns: network settings
527     :return: list of commands to configure external network
528     """
529     if 'external' in ns.enabled_network_list:
530         net_config = ns['networks']['external'][0]
531         external = True
532         pool_start, pool_end = net_config['floating_ip_range']
533     else:
534         net_config = ns['networks']['admin']
535         external = False
536         pool_start, pool_end = ns['apex']['networks']['admin'][
537             'introspection_range']
538     nic_config = net_config['nic_mapping']
539     gateway = net_config['gateway']
540     cmds = list()
541     # create network command
542     if nic_config['compute']['vlan'] == 'native':
543         ext_type = 'flat'
544     else:
545         ext_type = "vlan --provider-segment {}".format(nic_config[
546                                                        'compute']['vlan'])
547     cmds.append("openstack network create external --project service "
548                 "--external --provider-network-type {} "
549                 "--provider-physical-network datacentre".format(ext_type))
550     # create subnet command
551     cidr = net_config['cidr']
552     subnet_cmd = "openstack subnet create external-subnet --project " \
553                  "service --network external --no-dhcp --gateway {} " \
554                  "--allocation-pool start={},end={} --subnet-range " \
555                  "{}".format(gateway, pool_start, pool_end, str(cidr))
556     if external and cidr.version == 6:
557         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
558                       '--ipv6-address-mode slaac'
559     cmds.append(subnet_cmd)
560     logging.debug("Neutron external network commands determined "
561                   "as: {}".format(cmds))
562     return cmds
563
564
565 def create_congress_cmds(overcloud_file):
566     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
567     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
568     logging.info("Creating congress commands")
569     try:
570         ds_cfg = [
571             "username={}".format(overcloudrc['OS_USERNAME']),
572             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
573             "password={}".format(overcloudrc['OS_PASSWORD']),
574             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
575         ]
576     except KeyError:
577         logging.error("Unable to find all keys required for congress in "
578                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
579                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
580                       "file: {}".format(overcloud_file))
581         raise
582     cmds = list()
583     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
584
585     for driver in drivers:
586         if driver == 'doctor':
587             cmd = "{} \"{}\"".format(driver, driver)
588         else:
589             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
590         if driver == 'nova':
591             cmd += ' --config api_version="2.34"'
592         logging.debug("Congress command created: {}".format(cmd))
593         cmds.append(cmd)
594     return cmds