Merge "re-enable support for fdio dvr scenario"
[apex.git] / apex / overcloud / overcloud_deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import re
15 import shutil
16 import uuid
17 import struct
18 import time
19
20 from apex.common import constants as con
21 from apex.common.exceptions import ApexDeployException
22 from apex.common import parsers
23 from apex.virtual import utils as virt_utils
24 from cryptography.hazmat.primitives import serialization as \
25     crypto_serialization
26 from cryptography.hazmat.primitives.asymmetric import rsa
27 from cryptography.hazmat.backends import default_backend as \
28     crypto_default_backend
29
30
31 SDN_FILE_MAP = {
32     'opendaylight': {
33         'sfc': 'neutron-sfc-opendaylight.yaml',
34         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
35         'gluon': 'gluon.yaml',
36         'vpp': {
37             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
38             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
39             'default': 'neutron-opendaylight-honeycomb.yaml'
40         },
41         'default': 'neutron-opendaylight.yaml',
42     },
43     'onos': {
44         'sfc': 'neutron-onos-sfc.yaml',
45         'default': 'neutron-onos.yaml'
46     },
47     'ovn': 'neutron-ml2-ovn.yaml',
48     False: {
49         'vpp': 'neutron-ml2-vpp.yaml',
50         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
51     }
52 }
53
54 OTHER_FILE_MAP = {
55     'tacker': 'enable_tacker.yaml',
56     'congress': 'enable_congress.yaml',
57     'barometer': 'enable_barometer.yaml',
58     'rt_kvm': 'enable_rt_kvm.yaml'
59 }
60
61 OVS_PERF_MAP = {
62     'HostCpusList': 'dpdk_cores',
63     'NeutronDpdkCoreList': 'pmd_cores',
64     'NeutronDpdkSocketMemory': 'socket_memory',
65     'NeutronDpdkMemoryChannels': 'memory_channels'
66 }
67
68 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
69 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
70 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
71                       ".noarch.rpm"
72
73
74 def build_sdn_env_list(ds, sdn_map, env_list=None):
75     if env_list is None:
76         env_list = list()
77     for k, v in sdn_map.items():
78         if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
79             if isinstance(v, dict):
80                 env_list.extend(build_sdn_env_list(ds, v))
81             else:
82                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
83         elif isinstance(v, tuple):
84                 if ds[k] == v[0]:
85                     env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
86     if len(env_list) == 0:
87         try:
88             env_list.append(os.path.join(
89                 con.THT_ENV_DIR, sdn_map['default']))
90         except KeyError:
91             logging.warning("Unable to find default file for SDN")
92
93     return env_list
94
95
96 def _get_node_counts(inventory):
97     """
98     Return numbers of controller and compute nodes in inventory
99
100     :param inventory: node inventory data structure
101     :return: number of controller and compute nodes in inventory
102     """
103     if not inventory:
104         raise ApexDeployException("Empty inventory")
105
106     nodes = inventory['nodes']
107     num_control = 0
108     num_compute = 0
109     for node in nodes:
110         if node['capabilities'] == 'profile:control':
111             num_control += 1
112         elif node['capabilities'] == 'profile:compute':
113             num_compute += 1
114         else:
115             # TODO(trozet) do we want to allow capabilities to not exist?
116             logging.error("Every node must include a 'capabilities' key "
117                           "tagged with either 'profile:control' or "
118                           "'profile:compute'")
119             raise ApexDeployException("Node missing capabilities "
120                                       "key: {}".format(node))
121     return num_control, num_compute
122
123
124 def create_deploy_cmd(ds, ns, inv, tmp_dir,
125                       virtual, env_file='opnfv-environment.yaml'):
126
127     logging.info("Creating deployment command")
128     deploy_options = [env_file, 'network-environment.yaml']
129     ds_opts = ds['deploy_options']
130     deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
131
132     for k, v in OTHER_FILE_MAP.items():
133         if k in ds_opts and ds_opts[k]:
134             deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
135
136     if ds_opts['ceph']:
137         prep_storage_env(ds, tmp_dir)
138         deploy_options.append(os.path.join(con.THT_ENV_DIR,
139                                            'storage-environment.yaml'))
140     if ds['global_params']['ha_enabled']:
141         deploy_options.append(os.path.join(con.THT_ENV_DIR,
142                                            'puppet-pacemaker.yaml'))
143
144     if virtual:
145         deploy_options.append('virtual-environment.yaml')
146     else:
147         deploy_options.append('baremetal-environment.yaml')
148
149     num_control, num_compute = _get_node_counts(inv)
150     if num_control == 0 or num_compute == 0:
151         logging.error("Detected 0 control or compute nodes.  Control nodes: "
152                       "{}, compute nodes{}".format(num_control, num_compute))
153         raise ApexDeployException("Invalid number of control or computes")
154     elif num_control > 1 and not ds['global_params']['ha_enabled']:
155         num_control = 1
156     cmd = "openstack overcloud deploy --templates --timeout {} " \
157           "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
158     # build cmd env args
159     for option in deploy_options:
160         cmd += " -e {}".format(option)
161     cmd += " --ntp-server {}".format(ns['ntp'][0])
162     cmd += " --control-scale {}".format(num_control)
163     cmd += " --compute-scale {}".format(num_compute)
164     cmd += ' --control-flavor control --compute-flavor compute'
165     logging.info("Deploy command set: {}".format(cmd))
166
167     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
168         fh.write(cmd)
169     return cmd
170
171
172 def prep_image(ds, img, tmp_dir, root_pw=None):
173     """
174     Locates sdn image and preps for deployment.
175     :param ds: deploy settings
176     :param img: sdn image
177     :param tmp_dir: dir to store modified sdn image
178     :param root_pw: password to configure for overcloud image
179     :return: None
180     """
181     # TODO(trozet): Come up with a better way to organize this logic in this
182     # function
183     logging.info("Preparing image: {} for deployment".format(img))
184     if not os.path.isfile(img):
185         logging.error("Missing SDN image {}".format(img))
186         raise ApexDeployException("Missing SDN image file: {}".format(img))
187
188     ds_opts = ds['deploy_options']
189     virt_cmds = list()
190     sdn = ds_opts['sdn_controller']
191     # we need this due to rhbz #1436021
192     # fixed in systemd-219-37.el7
193     if sdn is not False:
194         logging.info("Neutron openvswitch-agent disabled")
195         virt_cmds.extend([{
196             con.VIRT_RUN_CMD:
197                 "rm -f /etc/systemd/system/multi-user.target.wants/"
198                 "neutron-openvswitch-agent.service"},
199             {
200             con.VIRT_RUN_CMD:
201                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
202                 ".service"
203         }])
204
205     if ds_opts['vpn']:
206         virt_cmds.append({con.VIRT_RUN_CMD: "systemctl enable zrpcd"})
207         logging.info("ZRPC and Quagga enabled")
208
209     dataplane = ds_opts['dataplane']
210     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
211         logging.info("Enabling kernel modules for dpdk")
212         # file to module mapping
213         uio_types = {
214             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
215             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
216         }
217         for mod_file, mod in uio_types.items():
218             with open(mod_file, 'w') as fh:
219                 fh.write('#!/bin/bash\n')
220                 fh.write('exec /sbin/modprobe {}'.format(mod))
221                 fh.close()
222
223             virt_cmds.extend([
224                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
225                     mod_file)},
226                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
227                                    "{}".format(os.path.basename(mod_file))}
228             ])
229     if root_pw:
230         pw_op = "password:{}".format(root_pw)
231         virt_cmds.append({con.VIRT_PW: pw_op})
232     if ds_opts['sfc'] and dataplane == 'ovs':
233         virt_cmds.extend([
234             {con.VIRT_RUN_CMD: "yum -y install "
235                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
236                                "{}".format(OVS_NSH_KMOD_RPM)},
237             {con.VIRT_RUN_CMD: "yum downgrade -y "
238                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
239                                "{}".format(OVS_NSH_RPM)}
240         ])
241     if dataplane == 'fdio':
242         # Patch neutron with using OVS external interface for router
243         # and add generic linux NS interface driver
244         virt_cmds.append(
245             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
246                                "-p1 < neutron-patch-NSDriver.patch"})
247
248     if sdn == 'opendaylight':
249         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
250             virt_cmds.extend([
251                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
252                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
253                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
254                                    "/root/puppet-opendaylight-"
255                                    "{}.tar.gz".format(ds_opts['odl_version'])}
256             ])
257             if ds_opts['odl_version'] == 'master':
258                 virt_cmds.extend([
259                     {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
260                         ds_opts['odl_version'])}
261                 ])
262             else:
263                 virt_cmds.extend([
264                     {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
265                         ds_opts['odl_version'])}
266                 ])
267
268         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
269                 and ds_opts['odl_vpp_netvirt']:
270             virt_cmds.extend([
271                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
272                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
273                     ODL_NETVIRT_VPP_RPM)}
274             ])
275
276     if sdn == 'ovn':
277         virt_cmds.extend([
278             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
279                                "*openvswitch*"},
280             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
281                                "*openvswitch*"}
282         ])
283
284     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
285     shutil.copyfile(img, tmp_oc_image)
286     logging.debug("Temporary overcloud image stored as: {}".format(
287         tmp_oc_image))
288     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
289     logging.info("Overcloud image customization complete")
290
291
292 def make_ssh_key():
293     """
294     Creates public and private ssh keys with 1024 bit RSA encryption
295     :return: private, public key
296     """
297     key = rsa.generate_private_key(
298         backend=crypto_default_backend(),
299         public_exponent=65537,
300         key_size=1024
301     )
302
303     private_key = key.private_bytes(
304         crypto_serialization.Encoding.PEM,
305         crypto_serialization.PrivateFormat.PKCS8,
306         crypto_serialization.NoEncryption())
307     public_key = key.public_key().public_bytes(
308         crypto_serialization.Encoding.OpenSSH,
309         crypto_serialization.PublicFormat.OpenSSH
310     )
311     pub_key = re.sub('ssh-rsa\s*', '', public_key.decode('utf-8'))
312     return private_key.decode('utf-8'), pub_key
313
314
315 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
316     """
317     Creates modified opnfv/network environments for deployment
318     :param ds: deploy settings
319     :param ns: network settings
320     :param inv: node inventory
321     :param opnfv_env: file path for opnfv-environment file
322     :param net_env: file path for network-environment file
323     :param tmp_dir: Apex tmp dir
324     :return:
325     """
326
327     logging.info("Preparing opnfv-environment and network-environment files")
328     ds_opts = ds['deploy_options']
329     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
330     shutil.copyfile(opnfv_env, tmp_opnfv_env)
331     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
332     tenant_ctrl_nic = tenant_nic_map['controller']['members'][0]
333     tenant_comp_nic = tenant_nic_map['compute']['members'][0]
334
335     # SSH keys
336     private_key, public_key = make_ssh_key()
337
338     # Make easier/faster variables to index in the file editor
339     if 'performance' in ds_opts:
340         perf = True
341         # vpp
342         if 'vpp' in ds_opts['performance']['Compute']:
343             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
344         else:
345             perf_vpp_comp = None
346         if 'vpp' in ds_opts['performance']['Controller']:
347             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
348         else:
349             perf_vpp_ctrl = None
350
351         # ovs
352         if 'ovs' in ds_opts['performance']['Compute']:
353             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
354         else:
355             perf_ovs_comp = None
356
357         # kernel
358         if 'kernel' in ds_opts['performance']['Compute']:
359             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
360         else:
361             perf_kern_comp = None
362     else:
363         perf = False
364
365     # Modify OPNFV environment
366     # TODO: Change to build a dict and outputing yaml rather than parsing
367     for line in fileinput.input(tmp_opnfv_env, inplace=True):
368         line = line.strip('\n')
369         output_line = line
370         if 'CloudDomain' in line:
371             output_line = "  CloudDomain: {}".format(ns['domain_name'])
372         elif 'replace_private_key' in line:
373             output_line = "      key: '{}'".format(private_key)
374         elif 'replace_public_key' in line:
375             output_line = "      key: '{}'".format(public_key)
376
377         if ds_opts['sdn_controller'] == 'opendaylight' and \
378                 'odl_vpp_routing_node' in ds_opts:
379             if 'opendaylight::vpp_routing_node' in line:
380                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
381                                .format(ds_opts['odl_vpp_routing_node'],
382                                        ns['domain_name']))
383             elif 'ControllerExtraConfig' in line:
384                 output_line = ("  ControllerExtraConfig:\n    "
385                                "tripleo::profile::base::neutron::agents::"
386                                "honeycomb::interface_role_mapping:"
387                                " ['{}:tenant-interface]'"
388                                .format(tenant_ctrl_nic))
389             elif 'NovaComputeExtraConfig' in line:
390                 output_line = ("  NovaComputeExtraConfig:\n    "
391                                "tripleo::profile::base::neutron::agents::"
392                                "honeycomb::interface_role_mapping:"
393                                " ['{}:tenant-interface]'"
394                                .format(tenant_comp_nic))
395         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
396             if 'NeutronVPPAgentPhysnets' in line:
397                 output_line = ("  NeutronVPPAgentPhysnets: 'datacentre:{}'".
398                                format(tenant_ctrl_nic))
399         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
400                 'dvr') is True:
401             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
402                 output_line = ''
403             elif 'NeutronDhcpAgentsPerNetwork' in line:
404                 num_control, num_compute = _get_node_counts(inv)
405                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
406                                .format(num_compute))
407             elif 'ComputeServices' in line:
408                 output_line = ("  ComputeServices:\n"
409                                "    - OS::TripleO::Services::NeutronDhcpAgent")
410
411         if perf:
412             for role in 'NovaCompute', 'Controller':
413                 if role == 'NovaCompute':
414                     perf_opts = perf_vpp_comp
415                 else:
416                     perf_opts = perf_vpp_ctrl
417                 cfg = "{}ExtraConfig".format(role)
418                 if cfg in line and perf_opts:
419                     perf_line = ''
420                     if 'main-core' in perf_opts:
421                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
422                                       .format(perf_opts['main-core']))
423                     if 'corelist-workers' in perf_opts:
424                         perf_line += ("\n    "
425                                       "fdio::vpp_cpu_corelist_workers: '{}'"
426                                       .format(perf_opts['corelist-workers']))
427                     if perf_line:
428                         output_line = ("  {}:{}".format(cfg, perf_line))
429
430             # kernel args
431             # (FIXME) use compute's kernel settings for all nodes for now.
432             if 'ComputeKernelArgs' in line and perf_kern_comp:
433                 kernel_args = ''
434                 for k, v in perf_kern_comp.items():
435                     kernel_args += "{}={} ".format(k, v)
436                 if kernel_args:
437                     output_line = "  ComputeKernelArgs: '{}'".\
438                         format(kernel_args)
439             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
440                 for k, v in OVS_PERF_MAP.items():
441                     if k in line and v in perf_ovs_comp:
442                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
443
444         print(output_line)
445
446     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
447
448     # Modify Network environment
449     for line in fileinput.input(net_env, inplace=True):
450         line = line.strip('\n')
451         if 'ComputeExtraConfigPre' in line and \
452                 ds_opts['dataplane'] == 'ovs_dpdk':
453             print('  OS::TripleO::ComputeExtraConfigPre: '
454                   './ovs-dpdk-preconfig.yaml')
455         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
456                 'resource_registry' in line:
457             print("resource_registry:\n"
458                   "  OS::TripleO::NodeUserData: first-boot.yaml")
459         elif perf and perf_kern_comp and \
460                 'NovaSchedulerDefaultFilters' in line:
461             print("  NovaSchedulerDefaultFilters: 'RamFilter,"
462                   "ComputeFilter,AvailabilityZoneFilter,"
463                   "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
464                   "NUMATopologyFilter'")
465         else:
466             print(line)
467
468     logging.info("network-environment file written to {}".format(net_env))
469
470
471 def generate_ceph_key():
472     key = os.urandom(16)
473     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
474     return base64.b64encode(header + key)
475
476
477 def prep_storage_env(ds, tmp_dir):
478     """
479     Creates storage environment file for deployment.  Source file is copied by
480     undercloud playbook to host.
481     :param ds:
482     :param tmp_dir:
483     :return:
484     """
485     ds_opts = ds['deploy_options']
486     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
487     if not os.path.isfile(storage_file):
488         logging.error("storage-environment file is not in tmp directory: {}. "
489                       "Check if file was copied from "
490                       "undercloud".format(tmp_dir))
491         raise ApexDeployException("storage-environment file not copied from "
492                                   "undercloud")
493     for line in fileinput.input(storage_file, inplace=True):
494         line = line.strip('\n')
495         if 'CephClusterFSID' in line:
496             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
497         elif 'CephMonKey' in line:
498             print("  CephMonKey: {}".format(generate_ceph_key().decode(
499                 'utf-8')))
500         elif 'CephAdminKey' in line:
501             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
502                 'utf-8')))
503         else:
504             print(line)
505     if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
506         with open(storage_file, 'a') as fh:
507             fh.write('  ExtraConfig:\n')
508             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
509                 ds_opts['ceph_device']
510             ))
511
512
513 def external_network_cmds(ns):
514     """
515     Generates external network openstack commands
516     :param ns: network settings
517     :return: list of commands to configure external network
518     """
519     if 'external' in ns.enabled_network_list:
520         net_config = ns['networks']['external'][0]
521         external = True
522         pool_start, pool_end = net_config['floating_ip_range']
523     else:
524         net_config = ns['networks']['admin']
525         external = False
526         pool_start, pool_end = ns['apex']['networks']['admin'][
527             'introspection_range']
528     nic_config = net_config['nic_mapping']
529     gateway = net_config['gateway']
530     cmds = list()
531     # create network command
532     if nic_config['compute']['vlan'] == 'native':
533         ext_type = 'flat'
534     else:
535         ext_type = "vlan --provider-segment {}".format(nic_config[
536                                                        'compute']['vlan'])
537     cmds.append("openstack network create external --project service "
538                 "--external --provider-network-type {} "
539                 "--provider-physical-network datacentre".format(ext_type))
540     # create subnet command
541     cidr = net_config['cidr']
542     subnet_cmd = "openstack subnet create external-subnet --project " \
543                  "service --network external --no-dhcp --gateway {} " \
544                  "--allocation-pool start={},end={} --subnet-range " \
545                  "{}".format(gateway, pool_start, pool_end, str(cidr))
546     if external and cidr.version == 6:
547         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
548                       '--ipv6-address-mode slaac'
549     cmds.append(subnet_cmd)
550     logging.debug("Neutron external network commands determined "
551                   "as: {}".format(cmds))
552     return cmds
553
554
555 def create_congress_cmds(overcloud_file):
556     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
557     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
558     logging.info("Creating congress commands")
559     try:
560         ds_cfg = [
561             "username={}".format(overcloudrc['OS_USERNAME']),
562             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
563             "password={}".format(overcloudrc['OS_PASSWORD']),
564             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
565         ]
566     except KeyError:
567         logging.error("Unable to find all keys required for congress in "
568                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
569                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
570                       "file: {}".format(overcloud_file))
571         raise
572     cmds = list()
573     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
574
575     for driver in drivers:
576         if driver == 'doctor':
577             cmd = "{} \"{}\"".format(driver, driver)
578         else:
579             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
580         if driver == 'nova':
581             cmd += ' --config api_version="2.34"'
582         logging.debug("Congress command created: {}".format(cmd))
583         cmds.append(cmd)
584     return cmds