Fix releasenotes label and include paths for docs
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import shutil
15 import uuid
16 import struct
17 import time
18
19 from apex.common import constants as con
20 from apex.common.exceptions import ApexDeployException
21 from apex.common import parsers
22 from apex.virtual import utils as virt_utils
23 from cryptography.hazmat.primitives import serialization as \
24     crypto_serialization
25 from cryptography.hazmat.primitives.asymmetric import rsa
26 from cryptography.hazmat.backends import default_backend as \
27     crypto_default_backend
28
29
30 SDN_FILE_MAP = {
31     'opendaylight': {
32         'sfc': 'neutron-sfc-opendaylight.yaml',
33         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
34         'gluon': 'gluon.yaml',
35         'vpp': {
36             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
37             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
38             'default': 'neutron-opendaylight-honeycomb.yaml'
39         },
40         'default': 'neutron-opendaylight.yaml',
41     },
42     'onos': {
43         'sfc': 'neutron-onos-sfc.yaml',
44         'default': 'neutron-onos.yaml'
45     },
46     'ovn': 'neutron-ml2-ovn.yaml',
47     False: {
48         'vpp': 'neutron-ml2-vpp.yaml',
49         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
50     }
51 }
52
53 OTHER_FILE_MAP = {
54     'tacker': 'enable_tacker.yaml',
55     'congress': 'enable_congress.yaml',
56     'barometer': 'enable_barometer.yaml',
57     'rt_kvm': 'enable_rt_kvm.yaml'
58 }
59
60 OVS_PERF_MAP = {
61     'HostCpusList': 'dpdk_cores',
62     'NeutronDpdkCoreList': 'pmd_cores',
63     'NeutronDpdkSocketMemory': 'socket_memory',
64     'NeutronDpdkMemoryChannels': 'memory_channels'
65 }
66
67 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
68 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
69 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
70                       ".noarch.rpm"
71
72
73 def build_sdn_env_list(ds, sdn_map, env_list=None):
74     if env_list is None:
75         env_list = list()
76     for k, v in sdn_map.items():
77         if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
78             if isinstance(v, dict):
79                 env_list.extend(build_sdn_env_list(ds, v))
80             else:
81                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
82         elif isinstance(v, tuple):
83                 if ds[k] == v[0]:
84                     env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
85     if len(env_list) == 0:
86         try:
87             env_list.append(os.path.join(
88                 con.THT_ENV_DIR, sdn_map['default']))
89         except KeyError:
90             logging.warning("Unable to find default file for SDN")
91
92     return env_list
93
94
95 def create_deploy_cmd(ds, ns, inv, tmp_dir,
96                       virtual, env_file='opnfv-environment.yaml'):
97
98     logging.info("Creating deployment command")
99     deploy_options = [env_file, 'network-environment.yaml']
100     ds_opts = ds['deploy_options']
101     deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
102
103     for k, v in OTHER_FILE_MAP.items():
104         if k in ds_opts and ds_opts[k]:
105             deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
106
107     if ds_opts['ceph']:
108         prep_storage_env(ds, tmp_dir)
109         deploy_options.append(os.path.join(con.THT_ENV_DIR,
110                                            'storage-environment.yaml'))
111     if ds['global_params']['ha_enabled']:
112         deploy_options.append(os.path.join(con.THT_ENV_DIR,
113                                            'puppet-pacemaker.yaml'))
114
115     if virtual:
116         deploy_options.append('virtual-environment.yaml')
117     else:
118         deploy_options.append('baremetal-environment.yaml')
119
120     num_control, num_compute = inv.get_node_counts()
121     if num_control == 0 or num_compute == 0:
122         logging.error("Detected 0 control or compute nodes.  Control nodes: "
123                       "{}, compute nodes{}".format(num_control, num_compute))
124         raise ApexDeployException("Invalid number of control or computes")
125     elif num_control > 1 and not ds['global_params']['ha_enabled']:
126         num_control = 1
127     cmd = "openstack overcloud deploy --templates --timeout {} " \
128           "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
129     # build cmd env args
130     for option in deploy_options:
131         cmd += " -e {}".format(option)
132     cmd += " --ntp-server {}".format(ns['ntp'][0])
133     cmd += " --control-scale {}".format(num_control)
134     cmd += " --compute-scale {}".format(num_compute)
135     cmd += ' --control-flavor control --compute-flavor compute'
136     logging.info("Deploy command set: {}".format(cmd))
137
138     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
139         fh.write(cmd)
140     return cmd
141
142
143 def prep_image(ds, img, tmp_dir, root_pw=None):
144     """
145     Locates sdn image and preps for deployment.
146     :param ds: deploy settings
147     :param img: sdn image
148     :param tmp_dir: dir to store modified sdn image
149     :param root_pw: password to configure for overcloud image
150     :return: None
151     """
152     # TODO(trozet): Come up with a better way to organize this logic in this
153     # function
154     logging.info("Preparing image: {} for deployment".format(img))
155     if not os.path.isfile(img):
156         logging.error("Missing SDN image {}".format(img))
157         raise ApexDeployException("Missing SDN image file: {}".format(img))
158
159     ds_opts = ds['deploy_options']
160     virt_cmds = list()
161     sdn = ds_opts['sdn_controller']
162     # we need this due to rhbz #1436021
163     # fixed in systemd-219-37.el7
164     if sdn is not False:
165         logging.info("Neutron openvswitch-agent disabled")
166         virt_cmds.extend([{
167             con.VIRT_RUN_CMD:
168                 "rm -f /etc/systemd/system/multi-user.target.wants/"
169                 "neutron-openvswitch-agent.service"},
170             {
171             con.VIRT_RUN_CMD:
172                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
173                 ".service"
174         }])
175
176     if ds_opts['vpn']:
177         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
178         virt_cmds.append({
179             con.VIRT_RUN_CMD:
180                 "echo '/opt/quagga/etc/init.d/zrpcd start' > "
181                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
182         virt_cmds.append({
183             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
184                               "zrpcd_start.sh"})
185         virt_cmds.append({
186             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
187                               "init.d/zrpcd_start.sh' /etc/rc.local "})
188         logging.info("ZRPCD process started")
189
190     dataplane = ds_opts['dataplane']
191     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
192         logging.info("Enabling kernel modules for dpdk")
193         # file to module mapping
194         uio_types = {
195             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
196             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
197         }
198         for mod_file, mod in uio_types.items():
199             with open(mod_file, 'w') as fh:
200                 fh.write('#!/bin/bash\n')
201                 fh.write('exec /sbin/modprobe {}'.format(mod))
202                 fh.close()
203
204             virt_cmds.extend([
205                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
206                     mod_file)},
207                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
208                                    "{}".format(os.path.basename(mod_file))}
209             ])
210     if root_pw:
211         pw_op = "password:{}".format(root_pw)
212         virt_cmds.append({con.VIRT_PW: pw_op})
213     if ds_opts['sfc'] and dataplane == 'ovs':
214         virt_cmds.extend([
215             {con.VIRT_RUN_CMD: "yum -y install "
216                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
217                                "{}".format(OVS_NSH_KMOD_RPM)},
218             {con.VIRT_RUN_CMD: "yum downgrade -y "
219                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
220                                "{}".format(OVS_NSH_RPM)}
221         ])
222     if dataplane == 'fdio':
223         # Patch neutron with using OVS external interface for router
224         # and add generic linux NS interface driver
225         virt_cmds.append(
226             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
227                                "-p1 < neutron-patch-NSDriver.patch"})
228         if sdn is False:
229             virt_cmds.extend([
230                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
231                 {con.VIRT_RUN_CMD: "yum install -y "
232                                    "/root/nosdn_vpp_rpms/*.rpm"}
233             ])
234
235     if sdn == 'opendaylight':
236         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
237             virt_cmds.extend([
238                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
239                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
240                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
241                                    "/root/puppet-opendaylight-"
242                                    "{}.tar.gz".format(ds_opts['odl_version'])}
243             ])
244             if ds_opts['odl_version'] == 'master':
245                 virt_cmds.extend([
246                     {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
247                         ds_opts['odl_version'])}
248                 ])
249             else:
250                 virt_cmds.extend([
251                     {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
252                         ds_opts['odl_version'])}
253                 ])
254
255         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
256                 and ds_opts['odl_vpp_netvirt']:
257             virt_cmds.extend([
258                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
259                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
260                     ODL_NETVIRT_VPP_RPM)}
261             ])
262
263     if sdn == 'ovn':
264         virt_cmds.extend([
265             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
266                                "*openvswitch*"},
267             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
268                                "*openvswitch*"}
269         ])
270
271     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
272     shutil.copyfile(img, tmp_oc_image)
273     logging.debug("Temporary overcloud image stored as: {}".format(
274         tmp_oc_image))
275     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
276     logging.info("Overcloud image customization complete")
277
278
279 def make_ssh_key():
280     """
281     Creates public and private ssh keys with 1024 bit RSA encryption
282     :return: private, public key
283     """
284     key = rsa.generate_private_key(
285         backend=crypto_default_backend(),
286         public_exponent=65537,
287         key_size=1024
288     )
289
290     private_key = key.private_bytes(
291         crypto_serialization.Encoding.PEM,
292         crypto_serialization.PrivateFormat.PKCS8,
293         crypto_serialization.NoEncryption())
294     public_key = key.public_key().public_bytes(
295         crypto_serialization.Encoding.OpenSSH,
296         crypto_serialization.PublicFormat.OpenSSH
297     )
298     return private_key.decode('utf-8'), public_key.decode('utf-8')
299
300
301 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
302     """
303     Creates modified opnfv/network environments for deployment
304     :param ds: deploy settings
305     :param ns: network settings
306     :param inv: node inventory
307     :param opnfv_env: file path for opnfv-environment file
308     :param net_env: file path for network-environment file
309     :param tmp_dir: Apex tmp dir
310     :return:
311     """
312
313     logging.info("Preparing opnfv-environment and network-environment files")
314     ds_opts = ds['deploy_options']
315     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
316     shutil.copyfile(opnfv_env, tmp_opnfv_env)
317     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
318     tenant_nic = dict()
319     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
320     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
321     external_nic_map = ns['networks']['external'][0]['nic_mapping']
322     external_nic = dict()
323     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
324
325     # SSH keys
326     private_key, public_key = make_ssh_key()
327
328     # Make easier/faster variables to index in the file editor
329     if 'performance' in ds_opts:
330         perf = True
331         # vpp
332         if 'vpp' in ds_opts['performance']['Compute']:
333             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
334         else:
335             perf_vpp_comp = None
336         if 'vpp' in ds_opts['performance']['Controller']:
337             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
338         else:
339             perf_vpp_ctrl = None
340
341         # ovs
342         if 'ovs' in ds_opts['performance']['Compute']:
343             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
344         else:
345             perf_ovs_comp = None
346
347         # kernel
348         if 'kernel' in ds_opts['performance']['Compute']:
349             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
350         else:
351             perf_kern_comp = None
352     else:
353         perf = False
354
355     # Modify OPNFV environment
356     # TODO: Change to build a dict and outputing yaml rather than parsing
357     for line in fileinput.input(tmp_opnfv_env, inplace=True):
358         line = line.strip('\n')
359         output_line = line
360         if 'CloudDomain' in line:
361             output_line = "  CloudDomain: {}".format(ns['domain_name'])
362         elif 'replace_private_key' in line:
363             output_line = "    private_key: |\n"
364             key_out = ''
365             for line in private_key.splitlines():
366                 key_out += "      {}\n".format(line)
367             output_line += key_out
368         elif 'replace_public_key' in line:
369             output_line = "    public_key: '{}'".format(public_key)
370
371         if ds_opts['sdn_controller'] == 'opendaylight' and \
372                 'odl_vpp_routing_node' in ds_opts:
373             if 'opendaylight::vpp_routing_node' in line:
374                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
375                                .format(ds_opts['odl_vpp_routing_node'],
376                                        ns['domain_name']))
377         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
378             if 'NeutronVPPAgentPhysnets' in line:
379                 output_line = ("  NeutronVPPAgentPhysnets: 'datacentre:{}'".
380                                format(tenant_nic['Controller']))
381         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
382                 'dvr') is True:
383             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
384                 output_line = ''
385             elif 'NeutronDhcpAgentsPerNetwork' in line:
386                 num_control, num_compute = inv.get_node_counts()
387                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
388                                .format(num_compute))
389             elif 'ComputeServices' in line:
390                 output_line = ("  ComputeServices:\n"
391                                "    - OS::TripleO::Services::NeutronDhcpAgent")
392
393         if perf:
394             for role in 'NovaCompute', 'Controller':
395                 if role == 'NovaCompute':
396                     perf_opts = perf_vpp_comp
397                 else:
398                     perf_opts = perf_vpp_ctrl
399                 cfg = "{}ExtraConfig".format(role)
400                 if cfg in line and perf_opts:
401                     perf_line = ''
402                     if 'main-core' in perf_opts:
403                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
404                                       .format(perf_opts['main-core']))
405                     if 'corelist-workers' in perf_opts:
406                         perf_line += ("\n    "
407                                       "fdio::vpp_cpu_corelist_workers: '{}'"
408                                       .format(perf_opts['corelist-workers']))
409                     if ds_opts['sdn_controller'] == 'opendaylight' and \
410                             ds_opts['dataplane'] == 'fdio':
411                         if role == 'NovaCompute':
412                             perf_line += ("\n    "
413                                           "tripleo::profile::base::neutron::"
414                                           "agents::honeycomb::"
415                                           "interface_role_mapping:"
416                                           " ['{}:tenant-interface',"
417                                           "'{}:public-interface']"
418                                           .format(tenant_nic[role],
419                                                   external_nic[role]))
420                         else:
421                             perf_line += ("\n    "
422                                           "tripleo::profile::base::neutron::"
423                                           "agents::honeycomb::"
424                                           "interface_role_mapping:"
425                                           " ['{}:tenant-interface']"
426                                           .format(tenant_nic[role]))
427                     if perf_line:
428                         output_line = ("  {}:{}".format(cfg, perf_line))
429
430             # kernel args
431             # (FIXME) use compute's kernel settings for all nodes for now.
432             if 'ComputeKernelArgs' in line and perf_kern_comp:
433                 kernel_args = ''
434                 for k, v in perf_kern_comp.items():
435                     kernel_args += "{}={} ".format(k, v)
436                 if kernel_args:
437                     output_line = "  ComputeKernelArgs: '{}'".\
438                         format(kernel_args)
439             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
440                 for k, v in OVS_PERF_MAP.items():
441                     if k in line and v in perf_ovs_comp:
442                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
443
444         print(output_line)
445
446     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
447
448     # Modify Network environment
449     for line in fileinput.input(net_env, inplace=True):
450         line = line.strip('\n')
451         if 'ComputeExtraConfigPre' in line and \
452                 ds_opts['dataplane'] == 'ovs_dpdk':
453             print('  OS::TripleO::ComputeExtraConfigPre: '
454                   './ovs-dpdk-preconfig.yaml')
455         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
456                 'resource_registry' in line:
457             print("resource_registry:\n"
458                   "  OS::TripleO::NodeUserData: first-boot.yaml")
459         elif perf and perf_kern_comp and \
460                 'NovaSchedulerDefaultFilters' in line:
461             print("  NovaSchedulerDefaultFilters: 'RamFilter,"
462                   "ComputeFilter,AvailabilityZoneFilter,"
463                   "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
464                   "NUMATopologyFilter'")
465         else:
466             print(line)
467
468     logging.info("network-environment file written to {}".format(net_env))
469
470
471 def generate_ceph_key():
472     key = os.urandom(16)
473     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
474     return base64.b64encode(header + key)
475
476
477 def prep_storage_env(ds, tmp_dir):
478     """
479     Creates storage environment file for deployment.  Source file is copied by
480     undercloud playbook to host.
481     :param ds:
482     :param tmp_dir:
483     :return:
484     """
485     ds_opts = ds['deploy_options']
486     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
487     if not os.path.isfile(storage_file):
488         logging.error("storage-environment file is not in tmp directory: {}. "
489                       "Check if file was copied from "
490                       "undercloud".format(tmp_dir))
491         raise ApexDeployException("storage-environment file not copied from "
492                                   "undercloud")
493     for line in fileinput.input(storage_file, inplace=True):
494         line = line.strip('\n')
495         if 'CephClusterFSID' in line:
496             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
497         elif 'CephMonKey' in line:
498             print("  CephMonKey: {}".format(generate_ceph_key().decode(
499                 'utf-8')))
500         elif 'CephAdminKey' in line:
501             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
502                 'utf-8')))
503         else:
504             print(line)
505     if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
506         with open(storage_file, 'a') as fh:
507             fh.write('  ExtraConfig:\n')
508             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
509                 ds_opts['ceph_device']
510             ))
511
512
513 def external_network_cmds(ns):
514     """
515     Generates external network openstack commands
516     :param ns: network settings
517     :return: list of commands to configure external network
518     """
519     if 'external' in ns.enabled_network_list:
520         net_config = ns['networks']['external'][0]
521         external = True
522         pool_start, pool_end = net_config['floating_ip_range']
523     else:
524         net_config = ns['networks']['admin']
525         external = False
526         pool_start, pool_end = ns['apex']['networks']['admin'][
527             'introspection_range']
528     nic_config = net_config['nic_mapping']
529     gateway = net_config['gateway']
530     cmds = list()
531     # create network command
532     if nic_config['compute']['vlan'] == 'native':
533         ext_type = 'flat'
534     else:
535         ext_type = "vlan --provider-segment {}".format(nic_config[
536                                                        'compute']['vlan'])
537     cmds.append("openstack network create external --project service "
538                 "--external --provider-network-type {} "
539                 "--provider-physical-network datacentre".format(ext_type))
540     # create subnet command
541     cidr = net_config['cidr']
542     subnet_cmd = "openstack subnet create external-subnet --project " \
543                  "service --network external --no-dhcp --gateway {} " \
544                  "--allocation-pool start={},end={} --subnet-range " \
545                  "{}".format(gateway, pool_start, pool_end, str(cidr))
546     if external and cidr.version == 6:
547         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
548                       '--ipv6-address-mode slaac'
549     cmds.append(subnet_cmd)
550     logging.debug("Neutron external network commands determined "
551                   "as: {}".format(cmds))
552     return cmds
553
554
555 def create_congress_cmds(overcloud_file):
556     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
557     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
558     logging.info("Creating congress commands")
559     try:
560         ds_cfg = [
561             "username={}".format(overcloudrc['OS_USERNAME']),
562             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
563             "password={}".format(overcloudrc['OS_PASSWORD']),
564             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
565         ]
566     except KeyError:
567         logging.error("Unable to find all keys required for congress in "
568                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
569                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
570                       "file: {}".format(overcloud_file))
571         raise
572     cmds = list()
573     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
574
575     for driver in drivers:
576         if driver == 'doctor':
577             cmd = "{} \"{}\"".format(driver, driver)
578         else:
579             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
580         if driver == 'nova':
581             cmd += ' --config api_version="2.34"'
582         logging.debug("Congress command created: {}".format(cmd))
583         cmds.append(cmd)
584     return cmds