Fix zrpcd start in the controller node
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import shutil
15 import uuid
16 import struct
17 import time
18
19 from apex.common import constants as con
20 from apex.common.exceptions import ApexDeployException
21 from apex.common import parsers
22 from apex.virtual import utils as virt_utils
23 from cryptography.hazmat.primitives import serialization as \
24     crypto_serialization
25 from cryptography.hazmat.primitives.asymmetric import rsa
26 from cryptography.hazmat.backends import default_backend as \
27     crypto_default_backend
28
29
30 SDN_FILE_MAP = {
31     'opendaylight': {
32         'sfc': 'neutron-sfc-opendaylight.yaml',
33         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
34         'gluon': 'gluon.yaml',
35         'vpp': {
36             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
37             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
38             'default': 'neutron-opendaylight-honeycomb.yaml'
39         },
40         'default': 'neutron-opendaylight.yaml',
41     },
42     'onos': {
43         'sfc': 'neutron-onos-sfc.yaml',
44         'default': 'neutron-onos.yaml'
45     },
46     'ovn': 'neutron-ml2-ovn.yaml',
47     False: {
48         'vpp': 'neutron-ml2-vpp.yaml',
49         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
50     }
51 }
52
53 OTHER_FILE_MAP = {
54     'tacker': 'enable_tacker.yaml',
55     'congress': 'enable_congress.yaml',
56     'barometer': 'enable_barometer.yaml',
57     'rt_kvm': 'enable_rt_kvm.yaml'
58 }
59
60 OVS_PERF_MAP = {
61     'HostCpusList': 'dpdk_cores',
62     'NeutronDpdkCoreList': 'pmd_cores',
63     'NeutronDpdkSocketMemory': 'socket_memory',
64     'NeutronDpdkMemoryChannels': 'memory_channels'
65 }
66
67 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
68 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
69 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
70                       ".noarch.rpm"
71
72
73 def build_sdn_env_list(ds, sdn_map, env_list=None):
74     if env_list is None:
75         env_list = list()
76     for k, v in sdn_map.items():
77         if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
78             if isinstance(v, dict):
79                 env_list.extend(build_sdn_env_list(ds, v))
80             else:
81                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
82         elif isinstance(v, tuple):
83                 if ds[k] == v[0]:
84                     env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
85     if len(env_list) == 0:
86         try:
87             env_list.append(os.path.join(
88                 con.THT_ENV_DIR, sdn_map['default']))
89         except KeyError:
90             logging.warning("Unable to find default file for SDN")
91
92     return env_list
93
94
95 def create_deploy_cmd(ds, ns, inv, tmp_dir,
96                       virtual, env_file='opnfv-environment.yaml'):
97
98     logging.info("Creating deployment command")
99     deploy_options = [env_file, 'network-environment.yaml']
100     ds_opts = ds['deploy_options']
101     deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
102
103     for k, v in OTHER_FILE_MAP.items():
104         if k in ds_opts and ds_opts[k]:
105             deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
106
107     if ds_opts['ceph']:
108         prep_storage_env(ds, tmp_dir)
109         deploy_options.append(os.path.join(con.THT_ENV_DIR,
110                                            'storage-environment.yaml'))
111     if ds['global_params']['ha_enabled']:
112         deploy_options.append(os.path.join(con.THT_ENV_DIR,
113                                            'puppet-pacemaker.yaml'))
114
115     if virtual:
116         deploy_options.append('virtual-environment.yaml')
117     else:
118         deploy_options.append('baremetal-environment.yaml')
119
120     num_control, num_compute = inv.get_node_counts()
121     if num_control == 0 or num_compute == 0:
122         logging.error("Detected 0 control or compute nodes.  Control nodes: "
123                       "{}, compute nodes{}".format(num_control, num_compute))
124         raise ApexDeployException("Invalid number of control or computes")
125     elif num_control > 1 and not ds['global_params']['ha_enabled']:
126         num_control = 1
127     cmd = "openstack overcloud deploy --templates --timeout {} " \
128           "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
129     # build cmd env args
130     for option in deploy_options:
131         cmd += " -e {}".format(option)
132     cmd += " --ntp-server {}".format(ns['ntp'][0])
133     cmd += " --control-scale {}".format(num_control)
134     cmd += " --compute-scale {}".format(num_compute)
135     cmd += ' --control-flavor control --compute-flavor compute'
136     logging.info("Deploy command set: {}".format(cmd))
137
138     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
139         fh.write(cmd)
140     return cmd
141
142
143 def prep_image(ds, img, tmp_dir, root_pw=None):
144     """
145     Locates sdn image and preps for deployment.
146     :param ds: deploy settings
147     :param img: sdn image
148     :param tmp_dir: dir to store modified sdn image
149     :param root_pw: password to configure for overcloud image
150     :return: None
151     """
152     # TODO(trozet): Come up with a better way to organize this logic in this
153     # function
154     logging.info("Preparing image: {} for deployment".format(img))
155     if not os.path.isfile(img):
156         logging.error("Missing SDN image {}".format(img))
157         raise ApexDeployException("Missing SDN image file: {}".format(img))
158
159     ds_opts = ds['deploy_options']
160     virt_cmds = list()
161     sdn = ds_opts['sdn_controller']
162     # we need this due to rhbz #1436021
163     # fixed in systemd-219-37.el7
164     if sdn is not False:
165         logging.info("Neutron openvswitch-agent disabled")
166         virt_cmds.extend([{
167             con.VIRT_RUN_CMD:
168                 "rm -f /etc/systemd/system/multi-user.target.wants/"
169                 "neutron-openvswitch-agent.service"},
170             {
171             con.VIRT_RUN_CMD:
172                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
173                 ".service"
174         }])
175
176     if ds_opts['vpn']:
177         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
178         virt_cmds.append({
179             con.VIRT_RUN_CMD:
180                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
181                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
182         virt_cmds.append({
183             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
184                               "zrpcd_start.sh"})
185         virt_cmds.append({
186             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
187                               "init.d/zrpcd_start.sh' /etc/rc.local "})
188         virt_cmds.append({
189             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
190                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
191         logging.info("ZRPCD process started")
192
193     dataplane = ds_opts['dataplane']
194     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
195         logging.info("Enabling kernel modules for dpdk")
196         # file to module mapping
197         uio_types = {
198             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
199             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
200         }
201         for mod_file, mod in uio_types.items():
202             with open(mod_file, 'w') as fh:
203                 fh.write('#!/bin/bash\n')
204                 fh.write('exec /sbin/modprobe {}'.format(mod))
205                 fh.close()
206
207             virt_cmds.extend([
208                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
209                     mod_file)},
210                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
211                                    "{}".format(os.path.basename(mod_file))}
212             ])
213     if root_pw:
214         pw_op = "password:{}".format(root_pw)
215         virt_cmds.append({con.VIRT_PW: pw_op})
216     if ds_opts['sfc'] and dataplane == 'ovs':
217         virt_cmds.extend([
218             {con.VIRT_RUN_CMD: "yum -y install "
219                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
220                                "{}".format(OVS_NSH_KMOD_RPM)},
221             {con.VIRT_RUN_CMD: "yum downgrade -y "
222                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
223                                "{}".format(OVS_NSH_RPM)}
224         ])
225     if dataplane == 'fdio':
226         # Patch neutron with using OVS external interface for router
227         # and add generic linux NS interface driver
228         virt_cmds.append(
229             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
230                                "-p1 < neutron-patch-NSDriver.patch"})
231         if sdn is False:
232             virt_cmds.extend([
233                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
234                 {con.VIRT_RUN_CMD: "yum install -y "
235                                    "/root/nosdn_vpp_rpms/*.rpm"}
236             ])
237
238     if sdn == 'opendaylight':
239         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
240             virt_cmds.extend([
241                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
242                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
243                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
244                                    "/root/puppet-opendaylight-"
245                                    "{}.tar.gz".format(ds_opts['odl_version'])}
246             ])
247             if ds_opts['odl_version'] == 'master':
248                 virt_cmds.extend([
249                     {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
250                         ds_opts['odl_version'])}
251                 ])
252             else:
253                 virt_cmds.extend([
254                     {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
255                         ds_opts['odl_version'])}
256                 ])
257
258         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
259                 and ds_opts['odl_vpp_netvirt']:
260             virt_cmds.extend([
261                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
262                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
263                     ODL_NETVIRT_VPP_RPM)}
264             ])
265
266     if sdn == 'ovn':
267         virt_cmds.extend([
268             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
269                                "*openvswitch*"},
270             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
271                                "*openvswitch*"}
272         ])
273
274     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
275     shutil.copyfile(img, tmp_oc_image)
276     logging.debug("Temporary overcloud image stored as: {}".format(
277         tmp_oc_image))
278     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
279     logging.info("Overcloud image customization complete")
280
281
282 def make_ssh_key():
283     """
284     Creates public and private ssh keys with 1024 bit RSA encryption
285     :return: private, public key
286     """
287     key = rsa.generate_private_key(
288         backend=crypto_default_backend(),
289         public_exponent=65537,
290         key_size=1024
291     )
292
293     private_key = key.private_bytes(
294         crypto_serialization.Encoding.PEM,
295         crypto_serialization.PrivateFormat.PKCS8,
296         crypto_serialization.NoEncryption())
297     public_key = key.public_key().public_bytes(
298         crypto_serialization.Encoding.OpenSSH,
299         crypto_serialization.PublicFormat.OpenSSH
300     )
301     return private_key.decode('utf-8'), public_key.decode('utf-8')
302
303
304 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
305     """
306     Creates modified opnfv/network environments for deployment
307     :param ds: deploy settings
308     :param ns: network settings
309     :param inv: node inventory
310     :param opnfv_env: file path for opnfv-environment file
311     :param net_env: file path for network-environment file
312     :param tmp_dir: Apex tmp dir
313     :return:
314     """
315
316     logging.info("Preparing opnfv-environment and network-environment files")
317     ds_opts = ds['deploy_options']
318     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
319     shutil.copyfile(opnfv_env, tmp_opnfv_env)
320     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
321     tenant_nic = dict()
322     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
323     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
324     external_nic_map = ns['networks']['external'][0]['nic_mapping']
325     external_nic = dict()
326     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
327
328     # SSH keys
329     private_key, public_key = make_ssh_key()
330
331     # Make easier/faster variables to index in the file editor
332     if 'performance' in ds_opts:
333         perf = True
334         # vpp
335         if 'vpp' in ds_opts['performance']['Compute']:
336             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
337         else:
338             perf_vpp_comp = None
339         if 'vpp' in ds_opts['performance']['Controller']:
340             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
341         else:
342             perf_vpp_ctrl = None
343
344         # ovs
345         if 'ovs' in ds_opts['performance']['Compute']:
346             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
347         else:
348             perf_ovs_comp = None
349
350         # kernel
351         if 'kernel' in ds_opts['performance']['Compute']:
352             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
353         else:
354             perf_kern_comp = None
355     else:
356         perf = False
357
358     # Modify OPNFV environment
359     # TODO: Change to build a dict and outputing yaml rather than parsing
360     for line in fileinput.input(tmp_opnfv_env, inplace=True):
361         line = line.strip('\n')
362         output_line = line
363         if 'CloudDomain' in line:
364             output_line = "  CloudDomain: {}".format(ns['domain_name'])
365         elif 'replace_private_key' in line:
366             output_line = "    private_key: |\n"
367             key_out = ''
368             for line in private_key.splitlines():
369                 key_out += "      {}\n".format(line)
370             output_line += key_out
371         elif 'replace_public_key' in line:
372             output_line = "    public_key: '{}'".format(public_key)
373
374         if ds_opts['sdn_controller'] == 'opendaylight' and \
375                 'odl_vpp_routing_node' in ds_opts:
376             if 'opendaylight::vpp_routing_node' in line:
377                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
378                                .format(ds_opts['odl_vpp_routing_node'],
379                                        ns['domain_name']))
380         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
381             if 'NeutronVPPAgentPhysnets' in line:
382                 output_line = ("  NeutronVPPAgentPhysnets: 'datacentre:{}'".
383                                format(tenant_nic['Controller']))
384         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
385                 'dvr') is True:
386             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
387                 output_line = ''
388             elif 'NeutronDhcpAgentsPerNetwork' in line:
389                 num_control, num_compute = inv.get_node_counts()
390                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
391                                .format(num_compute))
392             elif 'ComputeServices' in line:
393                 output_line = ("  ComputeServices:\n"
394                                "    - OS::TripleO::Services::NeutronDhcpAgent")
395
396         if perf:
397             for role in 'NovaCompute', 'Controller':
398                 if role == 'NovaCompute':
399                     perf_opts = perf_vpp_comp
400                 else:
401                     perf_opts = perf_vpp_ctrl
402                 cfg = "{}ExtraConfig".format(role)
403                 if cfg in line and perf_opts:
404                     perf_line = ''
405                     if 'main-core' in perf_opts:
406                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
407                                       .format(perf_opts['main-core']))
408                     if 'corelist-workers' in perf_opts:
409                         perf_line += ("\n    "
410                                       "fdio::vpp_cpu_corelist_workers: '{}'"
411                                       .format(perf_opts['corelist-workers']))
412                     if ds_opts['sdn_controller'] == 'opendaylight' and \
413                             ds_opts['dataplane'] == 'fdio':
414                         if role == 'NovaCompute':
415                             perf_line += ("\n    "
416                                           "tripleo::profile::base::neutron::"
417                                           "agents::honeycomb::"
418                                           "interface_role_mapping:"
419                                           " ['{}:tenant-interface',"
420                                           "'{}:public-interface']"
421                                           .format(tenant_nic[role],
422                                                   external_nic[role]))
423                         else:
424                             perf_line += ("\n    "
425                                           "tripleo::profile::base::neutron::"
426                                           "agents::honeycomb::"
427                                           "interface_role_mapping:"
428                                           " ['{}:tenant-interface']"
429                                           .format(tenant_nic[role]))
430                     if perf_line:
431                         output_line = ("  {}:{}".format(cfg, perf_line))
432
433             # kernel args
434             # (FIXME) use compute's kernel settings for all nodes for now.
435             if 'ComputeKernelArgs' in line and perf_kern_comp:
436                 kernel_args = ''
437                 for k, v in perf_kern_comp.items():
438                     kernel_args += "{}={} ".format(k, v)
439                 if kernel_args:
440                     output_line = "  ComputeKernelArgs: '{}'".\
441                         format(kernel_args)
442             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
443                 for k, v in OVS_PERF_MAP.items():
444                     if k in line and v in perf_ovs_comp:
445                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
446
447         print(output_line)
448
449     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
450
451     # Modify Network environment
452     for line in fileinput.input(net_env, inplace=True):
453         line = line.strip('\n')
454         if 'ComputeExtraConfigPre' in line and \
455                 ds_opts['dataplane'] == 'ovs_dpdk':
456             print('  OS::TripleO::ComputeExtraConfigPre: '
457                   './ovs-dpdk-preconfig.yaml')
458         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
459                 'resource_registry' in line:
460             print("resource_registry:\n"
461                   "  OS::TripleO::NodeUserData: first-boot.yaml")
462         elif perf and perf_kern_comp and \
463                 'NovaSchedulerDefaultFilters' in line:
464             print("  NovaSchedulerDefaultFilters: 'RamFilter,"
465                   "ComputeFilter,AvailabilityZoneFilter,"
466                   "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
467                   "NUMATopologyFilter'")
468         else:
469             print(line)
470
471     logging.info("network-environment file written to {}".format(net_env))
472
473
474 def generate_ceph_key():
475     key = os.urandom(16)
476     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
477     return base64.b64encode(header + key)
478
479
480 def prep_storage_env(ds, tmp_dir):
481     """
482     Creates storage environment file for deployment.  Source file is copied by
483     undercloud playbook to host.
484     :param ds:
485     :param tmp_dir:
486     :return:
487     """
488     ds_opts = ds['deploy_options']
489     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
490     if not os.path.isfile(storage_file):
491         logging.error("storage-environment file is not in tmp directory: {}. "
492                       "Check if file was copied from "
493                       "undercloud".format(tmp_dir))
494         raise ApexDeployException("storage-environment file not copied from "
495                                   "undercloud")
496     for line in fileinput.input(storage_file, inplace=True):
497         line = line.strip('\n')
498         if 'CephClusterFSID' in line:
499             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
500         elif 'CephMonKey' in line:
501             print("  CephMonKey: {}".format(generate_ceph_key().decode(
502                 'utf-8')))
503         elif 'CephAdminKey' in line:
504             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
505                 'utf-8')))
506         else:
507             print(line)
508     if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
509         with open(storage_file, 'a') as fh:
510             fh.write('  ExtraConfig:\n')
511             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
512                 ds_opts['ceph_device']
513             ))
514
515
516 def external_network_cmds(ns):
517     """
518     Generates external network openstack commands
519     :param ns: network settings
520     :return: list of commands to configure external network
521     """
522     if 'external' in ns.enabled_network_list:
523         net_config = ns['networks']['external'][0]
524         external = True
525         pool_start, pool_end = net_config['floating_ip_range']
526     else:
527         net_config = ns['networks']['admin']
528         external = False
529         pool_start, pool_end = ns['apex']['networks']['admin'][
530             'introspection_range']
531     nic_config = net_config['nic_mapping']
532     gateway = net_config['gateway']
533     cmds = list()
534     # create network command
535     if nic_config['compute']['vlan'] == 'native':
536         ext_type = 'flat'
537     else:
538         ext_type = "vlan --provider-segment {}".format(nic_config[
539                                                        'compute']['vlan'])
540     cmds.append("openstack network create external --project service "
541                 "--external --provider-network-type {} "
542                 "--provider-physical-network datacentre".format(ext_type))
543     # create subnet command
544     cidr = net_config['cidr']
545     subnet_cmd = "openstack subnet create external-subnet --project " \
546                  "service --network external --no-dhcp --gateway {} " \
547                  "--allocation-pool start={},end={} --subnet-range " \
548                  "{}".format(gateway, pool_start, pool_end, str(cidr))
549     if external and cidr.version == 6:
550         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
551                       '--ipv6-address-mode slaac'
552     cmds.append(subnet_cmd)
553     logging.debug("Neutron external network commands determined "
554                   "as: {}".format(cmds))
555     return cmds
556
557
558 def create_congress_cmds(overcloud_file):
559     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
560     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
561     logging.info("Creating congress commands")
562     try:
563         ds_cfg = [
564             "username={}".format(overcloudrc['OS_USERNAME']),
565             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
566             "password={}".format(overcloudrc['OS_PASSWORD']),
567             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
568         ]
569     except KeyError:
570         logging.error("Unable to find all keys required for congress in "
571                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
572                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
573                       "file: {}".format(overcloud_file))
574         raise
575     cmds = list()
576     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
577
578     for driver in drivers:
579         if driver == 'doctor':
580             cmd = "{} \"{}\"".format(driver, driver)
581         else:
582             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
583         if driver == 'nova':
584             cmd += ' --config api_version="2.34"'
585         logging.debug("Congress command created: {}".format(cmd))
586         cmds.append(cmd)
587     return cmds