1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
20 from apex.common import constants as con
21 from apex.common.exceptions import ApexDeployException
22 from apex.common import parsers
23 from apex.virtual import utils as virt_utils
24 from cryptography.hazmat.primitives import serialization as \
26 from cryptography.hazmat.primitives.asymmetric import rsa
27 from cryptography.hazmat.backends import default_backend as \
28 crypto_default_backend
33 'sfc': 'neutron-sfc-opendaylight.yaml',
34 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
35 'gluon': 'gluon.yaml',
37 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
38 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
39 'default': 'neutron-opendaylight-honeycomb.yaml'
41 'l2gw': 'neutron-l2gw-opendaylight.yaml',
42 'sriov': 'neutron-opendaylight-sriov.yaml',
43 'default': 'neutron-opendaylight.yaml',
46 'sfc': 'neutron-onos-sfc.yaml',
47 'default': 'neutron-onos.yaml'
49 'ovn': 'neutron-ml2-ovn.yaml',
51 'vpp': 'neutron-ml2-vpp.yaml',
52 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
57 'tacker': 'enable_tacker.yaml',
58 'congress': 'enable_congress.yaml',
59 'barometer': 'enable_barometer.yaml',
60 'rt_kvm': 'enable_rt_kvm.yaml'
64 'HostCpusList': 'dpdk_cores',
65 'NeutronDpdkCoreList': 'pmd_cores',
66 'NeutronDpdkSocketMemory': 'socket_memory',
67 'NeutronDpdkMemoryChannels': 'memory_channels'
70 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
71 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
72 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76 def build_sdn_env_list(ds, sdn_map, env_list=None):
78 Builds a list of SDN environment files to be used in the deploy cmd.
80 This function recursively searches an sdn_map. First the sdn controller is
81 matched and then the function looks for enabled features for that
82 controller to determine which environment files should be used. By
83 default the feature will be checked if set to true in deploy settings to be
84 added to the list. If a feature does not have a boolean value, then the
85 key and value pair to compare with are checked as a tuple (k,v).
87 :param ds: deploy settings
88 :param sdn_map: SDN map to recursively search
89 :param env_list: recursive var to hold previously found env_list
90 :return: A list of env files
94 for k, v in sdn_map.items():
95 if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
96 if isinstance(v, dict):
97 # Append default SDN env file first
98 # The assumption is that feature-enabled SDN env files
99 # override and do not conflict with previously set default
101 if ds['sdn_controller'] == k and 'default' in v:
102 env_list.append(os.path.join(con.THT_ENV_DIR,
104 env_list.extend(build_sdn_env_list(ds, v))
106 env_list.append(os.path.join(con.THT_ENV_DIR, v))
107 # check if the value is not a boolean
108 elif isinstance(v, tuple):
110 env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
111 if len(env_list) == 0:
113 env_list.append(os.path.join(
114 con.THT_ENV_DIR, sdn_map['default']))
116 logging.warning("Unable to find default file for SDN")
121 def create_deploy_cmd(ds, ns, inv, tmp_dir,
122 virtual, env_file='opnfv-environment.yaml',
125 logging.info("Creating deployment command")
126 deploy_options = ['network-environment.yaml']
129 deploy_options.append(env_file)
130 ds_opts = ds['deploy_options']
131 deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
133 for k, v in OTHER_FILE_MAP.items():
134 if k in ds_opts and ds_opts[k]:
135 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
138 prep_storage_env(ds, tmp_dir)
139 deploy_options.append(os.path.join(con.THT_ENV_DIR,
140 'storage-environment.yaml'))
142 prep_sriov_env(ds, tmp_dir)
144 if ds['global_params']['ha_enabled']:
145 deploy_options.append(os.path.join(con.THT_ENV_DIR,
146 'puppet-pacemaker.yaml'))
149 deploy_options.append('virtual-environment.yaml')
151 deploy_options.append('baremetal-environment.yaml')
153 num_control, num_compute = inv.get_node_counts()
154 if num_control == 0 or num_compute == 0:
155 logging.error("Detected 0 control or compute nodes. Control nodes: "
156 "{}, compute nodes{}".format(num_control, num_compute))
157 raise ApexDeployException("Invalid number of control or computes")
158 elif num_control > 1 and not ds['global_params']['ha_enabled']:
160 if platform.machine() == 'aarch64':
161 # aarch64 deploys were not completing in the default 90 mins.
162 # Not sure if this is related to the hardware the OOO support
163 # was developed on or the virtualization support in CentOS
164 # Either way it will probably get better over time as the aarch
165 # support matures in CentOS and deploy time should be tested in
166 # the future so this multiplier can be removed.
167 con.DEPLOY_TIMEOUT *= 2
168 cmd = "openstack overcloud deploy --templates --timeout {} " \
169 .format(con.DEPLOY_TIMEOUT)
171 for option in deploy_options:
172 cmd += " -e {}".format(option)
173 cmd += " --ntp-server {}".format(ns['ntp'][0])
174 cmd += " --control-scale {}".format(num_control)
175 cmd += " --compute-scale {}".format(num_compute)
176 cmd += ' --control-flavor control --compute-flavor compute'
178 cmd += ' --networks-file network_data.yaml'
181 with open('/sys/module/kvm_intel/parameters/nested') as f:
182 nested_kvm = f.read().strip()
183 if nested_kvm != 'Y':
184 libvirt_type = 'qemu'
185 cmd += ' --libvirt-type {}'.format(libvirt_type)
186 logging.info("Deploy command set: {}".format(cmd))
188 with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
193 def prep_image(ds, ns, img, tmp_dir, root_pw=None):
195 Locates sdn image and preps for deployment.
196 :param ds: deploy settings
197 :param ns: network settings
198 :param img: sdn image
199 :param tmp_dir: dir to store modified sdn image
200 :param root_pw: password to configure for overcloud image
203 # TODO(trozet): Come up with a better way to organize this logic in this
205 logging.info("Preparing image: {} for deployment".format(img))
206 if not os.path.isfile(img):
207 logging.error("Missing SDN image {}".format(img))
208 raise ApexDeployException("Missing SDN image file: {}".format(img))
210 ds_opts = ds['deploy_options']
212 sdn = ds_opts['sdn_controller']
213 # we need this due to rhbz #1436021
214 # fixed in systemd-219-37.el7
216 logging.info("Neutron openvswitch-agent disabled")
219 "rm -f /etc/systemd/system/multi-user.target.wants/"
220 "neutron-openvswitch-agent.service"},
223 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
227 if ns.get('http_proxy', ''):
230 "echo 'http_proxy={}' >> /etc/environment".format(
233 if ns.get('https_proxy', ''):
236 "echo 'https_proxy={}' >> /etc/environment".format(
240 virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
243 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
244 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
246 con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
249 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
250 "init.d/zrpcd_start.sh' /etc/rc.local "})
252 con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
253 "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
254 logging.info("ZRPCD process started")
256 dataplane = ds_opts['dataplane']
257 if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
258 logging.info("Enabling kernel modules for dpdk")
259 # file to module mapping
261 os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
262 os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
264 for mod_file, mod in uio_types.items():
265 with open(mod_file, 'w') as fh:
266 fh.write('#!/bin/bash\n')
267 fh.write('exec /sbin/modprobe {}'.format(mod))
271 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
273 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
274 "{}".format(os.path.basename(mod_file))}
277 pw_op = "password:{}".format(root_pw)
278 virt_cmds.append({con.VIRT_PW: pw_op})
279 if ds_opts['sfc'] and dataplane == 'ovs':
281 {con.VIRT_RUN_CMD: "yum -y install "
282 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
283 "{}".format(OVS_NSH_KMOD_RPM)},
284 {con.VIRT_RUN_CMD: "yum downgrade -y "
285 "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
286 "{}".format(OVS_NSH_RPM)}
288 if dataplane == 'fdio':
289 # Patch neutron with using OVS external interface for router
290 # and add generic linux NS interface driver
292 {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
293 "-p1 < neutron-patch-NSDriver.patch"})
296 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
297 {con.VIRT_RUN_CMD: "yum install -y "
298 "/root/nosdn_vpp_rpms/*.rpm"}
301 if sdn == 'opendaylight':
302 if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
304 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
305 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
306 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
307 "/root/puppet-opendaylight-"
308 "{}.tar.gz".format(ds_opts['odl_version'])}
310 if ds_opts['odl_version'] == 'master':
312 {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
313 ds_opts['odl_version'])}
317 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
318 ds_opts['odl_version'])}
321 elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
322 and ds_opts['odl_vpp_netvirt']:
324 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
325 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
326 ODL_NETVIRT_VPP_RPM)}
331 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
333 {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
337 tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
338 shutil.copyfile(img, tmp_oc_image)
339 logging.debug("Temporary overcloud image stored as: {}".format(
341 virt_utils.virt_customize(virt_cmds, tmp_oc_image)
342 logging.info("Overcloud image customization complete")
347 Creates public and private ssh keys with 1024 bit RSA encryption
348 :return: private, public key
350 key = rsa.generate_private_key(
351 backend=crypto_default_backend(),
352 public_exponent=65537,
356 private_key = key.private_bytes(
357 crypto_serialization.Encoding.PEM,
358 crypto_serialization.PrivateFormat.PKCS8,
359 crypto_serialization.NoEncryption())
360 public_key = key.public_key().public_bytes(
361 crypto_serialization.Encoding.OpenSSH,
362 crypto_serialization.PublicFormat.OpenSSH
364 return private_key.decode('utf-8'), public_key.decode('utf-8')
367 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
369 Creates modified opnfv/network environments for deployment
370 :param ds: deploy settings
371 :param ns: network settings
372 :param inv: node inventory
373 :param opnfv_env: file path for opnfv-environment file
374 :param net_env: file path for network-environment file
375 :param tmp_dir: Apex tmp dir
379 logging.info("Preparing opnfv-environment and network-environment files")
380 ds_opts = ds['deploy_options']
381 tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
382 shutil.copyfile(opnfv_env, tmp_opnfv_env)
383 tenant_nic_map = ns['networks']['tenant']['nic_mapping']
385 tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
386 tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
387 external_nic_map = ns['networks']['external'][0]['nic_mapping']
388 external_nic = dict()
389 external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
392 private_key, public_key = make_ssh_key()
394 # Make easier/faster variables to index in the file editor
395 if 'performance' in ds_opts:
398 if 'vpp' in ds_opts['performance']['Compute']:
399 perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
402 if 'vpp' in ds_opts['performance']['Controller']:
403 perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
408 if 'ovs' in ds_opts['performance']['Compute']:
409 perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
414 if 'kernel' in ds_opts['performance']['Compute']:
415 perf_kern_comp = ds_opts['performance']['Compute']['kernel']
417 perf_kern_comp = None
421 # Modify OPNFV environment
422 # TODO: Change to build a dict and outputting yaml rather than parsing
423 for line in fileinput.input(tmp_opnfv_env, inplace=True):
424 line = line.strip('\n')
426 if 'CloudDomain' in line:
427 output_line = " CloudDomain: {}".format(ns['domain_name'])
428 elif 'replace_private_key' in line:
429 output_line = " private_key: |\n"
431 for line in private_key.splitlines():
432 key_out += " {}\n".format(line)
433 output_line += key_out
434 elif 'replace_public_key' in line:
435 output_line = " public_key: '{}'".format(public_key)
436 elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
437 'resource_registry' in line:
438 output_line = "resource_registry:\n" \
439 " OS::TripleO::NodeUserData: first-boot.yaml"
440 elif 'ComputeExtraConfigPre' in line and \
441 ds_opts['dataplane'] == 'ovs_dpdk':
442 output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
443 './ovs-dpdk-preconfig.yaml'
445 if ds_opts['sdn_controller'] == 'opendaylight' and \
446 'odl_vpp_routing_node' in ds_opts:
447 if 'opendaylight::vpp_routing_node' in line:
448 output_line = (" opendaylight::vpp_routing_node: {}.{}"
449 .format(ds_opts['odl_vpp_routing_node'],
451 elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
452 if 'NeutronVPPAgentPhysnets' in line:
453 output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'".
454 format(tenant_nic['Controller']))
455 elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
457 if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
459 elif 'NeutronDhcpAgentsPerNetwork' in line:
460 num_control, num_compute = inv.get_node_counts()
461 output_line = (" NeutronDhcpAgentsPerNetwork: {}"
462 .format(num_compute))
463 elif 'ComputeServices' in line:
464 output_line = (" ComputeServices:\n"
465 " - OS::TripleO::Services::NeutronDhcpAgent")
466 # SRIOV networks are VLAN based provider networks. In order to simplify
467 # the deployment, nfv_sriov will be the default physnet. VLANs are not
468 # needed in advance, and the user will have to create the network
469 # specifying the segmentation-id.
471 if 'NeutronNetworkVLANRanges' in line:
472 output_line = ("{},nfv_sriov'".format(line[:-1]))
475 for role in 'NovaCompute', 'Controller':
476 if role == 'NovaCompute':
477 perf_opts = perf_vpp_comp
479 perf_opts = perf_vpp_ctrl
480 cfg = "{}ExtraConfig".format(role)
481 if cfg in line and perf_opts:
483 if 'main-core' in perf_opts:
484 perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
485 .format(perf_opts['main-core']))
486 if 'corelist-workers' in perf_opts:
488 "fdio::vpp_cpu_corelist_workers: '{}'"
489 .format(perf_opts['corelist-workers']))
490 if ds_opts['sdn_controller'] == 'opendaylight' and \
491 ds_opts['dataplane'] == 'fdio':
492 if role == 'NovaCompute':
494 "tripleo::profile::base::neutron::"
495 "agents::honeycomb::"
496 "interface_role_mapping:"
497 " ['{}:tenant-interface',"
498 "'{}:public-interface']"
499 .format(tenant_nic[role],
503 "tripleo::profile::base::neutron::"
504 "agents::honeycomb::"
505 "interface_role_mapping:"
506 " ['{}:tenant-interface']"
507 .format(tenant_nic[role]))
509 output_line = (" {}:{}".format(cfg, perf_line))
511 if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
512 for k, v in OVS_PERF_MAP.items():
513 if k in line and v in perf_ovs_comp:
514 output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
517 # (FIXME) use compute's kernel settings for all nodes for now.
519 if 'NovaSchedulerDefaultFilters' in line:
521 " NovaSchedulerDefaultFilters: 'RamFilter," \
522 "ComputeFilter,AvailabilityZoneFilter," \
523 "ComputeCapabilitiesFilter," \
524 "ImagePropertiesFilter,NUMATopologyFilter'"
525 elif 'ComputeKernelArgs' in line:
527 for k, v in perf_kern_comp.items():
528 kernel_args += "{}={} ".format(k, v)
530 output_line = " ComputeKernelArgs: '{}'".\
535 logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
538 def generate_ceph_key():
540 header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
541 return base64.b64encode(header + key)
544 def prep_storage_env(ds, tmp_dir):
546 Creates storage environment file for deployment. Source file is copied by
547 undercloud playbook to host.
552 ds_opts = ds['deploy_options']
553 storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
554 if not os.path.isfile(storage_file):
555 logging.error("storage-environment file is not in tmp directory: {}. "
556 "Check if file was copied from "
557 "undercloud".format(tmp_dir))
558 raise ApexDeployException("storage-environment file not copied from "
560 for line in fileinput.input(storage_file, inplace=True):
561 line = line.strip('\n')
562 if 'CephClusterFSID' in line:
563 print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
564 elif 'CephMonKey' in line:
565 print(" CephMonKey: {}".format(generate_ceph_key().decode(
567 elif 'CephAdminKey' in line:
568 print(" CephAdminKey: {}".format(generate_ceph_key().decode(
570 elif 'CephClientKey' in line:
571 print(" CephClientKey: {}".format(generate_ceph_key().decode(
575 if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
576 with open(storage_file, 'a') as fh:
577 fh.write(' ExtraConfig:\n')
578 fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
579 ds_opts['ceph_device']
583 def prep_sriov_env(ds, tmp_dir):
585 Creates SRIOV environment file for deployment. Source file is copied by
586 undercloud playbook to host.
591 ds_opts = ds['deploy_options']
592 sriov_iface = ds_opts['sriov']
593 sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
594 if not os.path.isfile(sriov_file):
595 logging.error("sriov-environment file is not in tmp directory: {}. "
596 "Check if file was copied from "
597 "undercloud".format(tmp_dir))
598 raise ApexDeployException("sriov-environment file not copied from "
600 # TODO(rnoriega): Instead of line editing, refactor this code to load
601 # yaml file into a dict, edit it and write the file back.
602 for line in fileinput.input(sriov_file, inplace=True):
603 line = line.strip('\n')
604 if 'NovaSchedulerDefaultFilters' in line:
605 print(" {}".format(line[3:]))
606 elif 'NovaSchedulerAvailableFilters' in line:
607 print(" {}".format(line[3:]))
608 elif 'NeutronPhysicalDevMappings' in line:
609 print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
610 .format(sriov_iface))
611 elif 'NeutronSriovNumVFs' in line:
612 print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
613 elif 'NovaPCIPassthrough' in line:
614 print(" NovaPCIPassthrough:")
615 elif 'devname' in line:
616 print(" - devname: \"{}\"".format(sriov_iface))
617 elif 'physical_network' in line:
618 print(" physical_network: \"nfv_sriov\"")
623 def external_network_cmds(ns):
625 Generates external network openstack commands
626 :param ns: network settings
627 :return: list of commands to configure external network
629 if 'external' in ns.enabled_network_list:
630 net_config = ns['networks']['external'][0]
632 pool_start, pool_end = net_config['floating_ip_range']
634 net_config = ns['networks']['admin']
636 pool_start, pool_end = ns['apex']['networks']['admin'][
637 'introspection_range']
638 nic_config = net_config['nic_mapping']
639 gateway = net_config['gateway']
641 # create network command
642 if nic_config['compute']['vlan'] == 'native':
645 ext_type = "vlan --provider-segment {}".format(nic_config[
647 cmds.append("openstack network create external --project service "
648 "--external --provider-network-type {} "
649 "--provider-physical-network datacentre".format(ext_type))
650 # create subnet command
651 cidr = net_config['cidr']
652 subnet_cmd = "openstack subnet create external-subnet --project " \
653 "service --network external --no-dhcp --gateway {} " \
654 "--allocation-pool start={},end={} --subnet-range " \
655 "{}".format(gateway, pool_start, pool_end, str(cidr))
656 if external and cidr.version == 6:
657 subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
658 '--ipv6-address-mode slaac'
659 cmds.append(subnet_cmd)
660 logging.debug("Neutron external network commands determined "
661 "as: {}".format(cmds))
665 def create_congress_cmds(overcloud_file):
666 drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
667 overcloudrc = parsers.parse_overcloudrc(overcloud_file)
668 logging.info("Creating congress commands")
671 "username={}".format(overcloudrc['OS_USERNAME']),
672 "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
673 "password={}".format(overcloudrc['OS_PASSWORD']),
674 "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
677 logging.error("Unable to find all keys required for congress in "
678 "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
679 "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
680 "file: {}".format(overcloud_file))
683 ds_cfg = '--config ' + ' --config '.join(ds_cfg)
685 for driver in drivers:
686 if driver == 'doctor':
687 cmd = "{} \"{}\"".format(driver, driver)
689 cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
691 cmd += ' --config api_version="2.34"'
692 logging.debug("Congress command created: {}".format(cmd))