re-enable support for fdio dvr scenario
[apex.git] / apex / overcloud / overcloud_deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import re
15 import shutil
16 import uuid
17 import struct
18 import time
19
20 from apex.common import constants as con
21 from apex.common.exceptions import ApexDeployException
22 from apex.common import parsers
23 from apex.virtual import virtual_utils as virt_utils
24 from cryptography.hazmat.primitives import serialization as \
25     crypto_serialization
26 from cryptography.hazmat.primitives.asymmetric import rsa
27 from cryptography.hazmat.backends import default_backend as \
28     crypto_default_backend
29
30
31 SDN_FILE_MAP = {
32     'opendaylight': {
33         'sfc': 'neutron-sfc-opendaylight.yaml',
34         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
35         'gluon': 'gluon.yaml',
36         'vpp': {
37             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
38             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
39             'default': 'neutron-opendaylight-honeycomb.yaml'
40         },
41         'default': 'neutron-opendaylight.yaml',
42     },
43     'onos': {
44         'sfc': 'neutron-onos-sfc.yaml',
45         'default': 'neutron-onos.yaml'
46     },
47     'ovn': 'neutron-ml2-ovn.yaml',
48     False: {
49         'vpp': 'neutron-ml2-vpp.yaml',
50         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
51     }
52 }
53
54 OTHER_FILE_MAP = {
55     'tacker': 'enable_tacker.yaml',
56     'congress': 'enable_congress.yaml',
57     'barometer': 'enable_barometer.yaml',
58     'rt_kvm': 'enable_rt_kvm.yaml'
59 }
60
61 OVS_PERF_MAP = {
62     'HostCpusList': 'dpdk_cores',
63     'NeutronDpdkCoreList': 'pmd_cores',
64     'NeutronDpdkSocketMemory': 'socket_memory',
65     'NeutronDpdkMemoryChannels': 'memory_channels'
66 }
67
68 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
69 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
70 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
71                       ".noarch.rpm"
72
73
74 def build_sdn_env_list(ds, sdn_map, env_list=None):
75     if env_list is None:
76         env_list = list()
77     for k, v in sdn_map.items():
78         if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
79             if isinstance(v, dict):
80                 env_list.extend(build_sdn_env_list(ds, v))
81             else:
82                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
83         elif isinstance(v, tuple):
84                 if ds[k] == v[0]:
85                     env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
86     if len(env_list) == 0:
87         try:
88             env_list.append(os.path.join(
89                 con.THT_ENV_DIR, sdn_map['default']))
90         except KeyError:
91             logging.warning("Unable to find default file for SDN")
92
93     return env_list
94
95
96 def _get_node_counts(inventory):
97     """
98     Return numbers of controller and compute nodes in inventory
99
100     :param inventory: node inventory data structure
101     :return: number of controller and compute nodes in inventory
102     """
103     if not inventory:
104         raise ApexDeployException("Empty inventory")
105
106     nodes = inventory['nodes']
107     num_control = 0
108     num_compute = 0
109     for node in nodes:
110         if node['capabilities'] == 'profile:control':
111             num_control += 1
112         elif node['capabilities'] == 'profile:compute':
113             num_compute += 1
114         else:
115             # TODO(trozet) do we want to allow capabilities to not exist?
116             logging.error("Every node must include a 'capabilities' key "
117                           "tagged with either 'profile:control' or "
118                           "'profile:compute'")
119             raise ApexDeployException("Node missing capabilities "
120                                       "key: {}".format(node))
121     return num_control, num_compute
122
123
124 def create_deploy_cmd(ds, ns, inv, tmp_dir,
125                       virtual, env_file='opnfv-environment.yaml'):
126
127     logging.info("Creating deployment command")
128     deploy_options = [env_file, 'network-environment.yaml']
129     ds_opts = ds['deploy_options']
130     deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
131
132     # TODO(trozet): make sure rt kvm file is in tht dir
133     for k, v in OTHER_FILE_MAP.items():
134         if k in ds_opts and ds_opts[k]:
135             deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
136
137     if ds_opts['ceph']:
138         prep_storage_env(ds, tmp_dir)
139         deploy_options.append(os.path.join(con.THT_ENV_DIR,
140                                            'storage-environment.yaml'))
141     if ds['global_params']['ha_enabled']:
142         deploy_options.append(os.path.join(con.THT_ENV_DIR,
143                                            'puppet-pacemaker.yaml'))
144
145     if virtual:
146         deploy_options.append('virtual-environment.yaml')
147     else:
148         deploy_options.append('baremetal-environment.yaml')
149
150     num_control, num_compute = _get_node_counts(inv)
151     if num_control == 0 or num_compute == 0:
152         logging.error("Detected 0 control or compute nodes.  Control nodes: "
153                       "{}, compute nodes{}".format(num_control, num_compute))
154         raise ApexDeployException("Invalid number of control or computes")
155     elif num_control > 1 and not ds['global_params']['ha_enabled']:
156         num_control = 1
157     cmd = "openstack overcloud deploy --templates --timeout {} " \
158           "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
159     # build cmd env args
160     for option in deploy_options:
161         cmd += " -e {}".format(option)
162     cmd += " --ntp-server {}".format(ns['ntp'][0])
163     cmd += " --control-scale {}".format(num_control)
164     cmd += " --compute-scale {}".format(num_compute)
165     cmd += ' --control-flavor control --compute-flavor compute'
166     logging.info("Deploy command set: {}".format(cmd))
167
168     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
169         fh.write(cmd)
170     return cmd
171
172
173 def prep_image(ds, img, tmp_dir, root_pw=None):
174     """
175     Locates sdn image and preps for deployment.
176     :param ds: deploy settings
177     :param img: sdn image
178     :param tmp_dir: dir to store modified sdn image
179     :param root_pw: password to configure for overcloud image
180     :return: None
181     """
182     # TODO(trozet): Come up with a better way to organize this logic in this
183     # function
184     logging.info("Preparing image: {} for deployment".format(img))
185     if not os.path.isfile(img):
186         logging.error("Missing SDN image {}".format(img))
187         raise ApexDeployException("Missing SDN image file: {}".format(img))
188
189     ds_opts = ds['deploy_options']
190     virt_cmds = list()
191     sdn = ds_opts['sdn_controller']
192     # we need this due to rhbz #1436021
193     # fixed in systemd-219-37.el7
194     if sdn is not False:
195         logging.info("Neutron openvswitch-agent disabled")
196         virt_cmds.extend([{
197             con.VIRT_RUN_CMD:
198                 "rm -f /etc/systemd/system/multi-user.target.wants/"
199                 "neutron-openvswitch-agent.service"},
200             {
201             con.VIRT_RUN_CMD:
202                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
203                 ".service"
204         }])
205
206     if ds_opts['vpn']:
207         virt_cmds.append({con.VIRT_RUN_CMD: "systemctl enable zrpcd"})
208         logging.info("ZRPC and Quagga enabled")
209
210     dataplane = ds_opts['dataplane']
211     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
212         logging.info("Enabling kernel modules for dpdk")
213         # file to module mapping
214         uio_types = {
215             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
216             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
217         }
218         for mod_file, mod in uio_types.items():
219             with open(mod_file, 'w') as fh:
220                 fh.write('#!/bin/bash\n')
221                 fh.write('exec /sbin/modprobe {}'.format(mod))
222                 fh.close()
223
224             virt_cmds.extend([
225                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
226                     mod_file)},
227                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
228                                    "{}".format(os.path.basename(mod_file))}
229             ])
230     if root_pw:
231         pw_op = "password:{}".format(root_pw)
232         virt_cmds.append({con.VIRT_PW: pw_op})
233     if ds_opts['sfc'] and dataplane == 'ovs':
234         virt_cmds.extend([
235             {con.VIRT_RUN_CMD: "yum -y install "
236                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
237                                "{}".format(OVS_NSH_KMOD_RPM)},
238             {con.VIRT_RUN_CMD: "yum downgrade -y "
239                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
240                                "{}".format(OVS_NSH_RPM)}
241         ])
242     if dataplane == 'fdio':
243         # Patch neutron with using OVS external interface for router
244         # and add generic linux NS interface driver
245         virt_cmds.append(
246             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
247                                "-p1 < neutron-patch-NSDriver.patch"})
248
249     if sdn == 'opendaylight':
250         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
251             virt_cmds.extend([
252                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
253                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
254                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
255                                    "/root/puppet-opendaylight-"
256                                    "{}.tar.gz".format(ds_opts['odl_version'])}
257             ])
258             if ds_opts['odl_version'] == 'master':
259                 virt_cmds.extend([
260                     {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
261                         ds_opts['odl_version'])}
262                 ])
263             else:
264                 virt_cmds.extend([
265                     {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
266                         ds_opts['odl_version'])}
267                 ])
268
269         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
270                 and ds_opts['odl_vpp_netvirt']:
271             virt_cmds.extend([
272                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
273                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
274                     ODL_NETVIRT_VPP_RPM)}
275             ])
276
277     if sdn == 'ovn':
278         virt_cmds.extend([
279             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
280                                "*openvswitch*"},
281             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
282                                "*openvswitch*"}
283         ])
284
285     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
286     shutil.copyfile(img, tmp_oc_image)
287     logging.debug("Temporary overcloud image stored as: {}".format(
288         tmp_oc_image))
289     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
290     logging.info("Overcloud image customization complete")
291
292
293 def make_ssh_key():
294     """
295     Creates public and private ssh keys with 1024 bit RSA encryption
296     :return: private, public key
297     """
298     key = rsa.generate_private_key(
299         backend=crypto_default_backend(),
300         public_exponent=65537,
301         key_size=1024
302     )
303
304     private_key = key.private_bytes(
305         crypto_serialization.Encoding.PEM,
306         crypto_serialization.PrivateFormat.PKCS8,
307         crypto_serialization.NoEncryption())
308     public_key = key.public_key().public_bytes(
309         crypto_serialization.Encoding.OpenSSH,
310         crypto_serialization.PublicFormat.OpenSSH
311     )
312     pub_key = re.sub('ssh-rsa\s*', '', public_key.decode('utf-8'))
313     return private_key.decode('utf-8'), pub_key
314
315
316 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
317     """
318     Creates modified opnfv/network environments for deployment
319     :param ds: deploy settings
320     :param ns: network settings
321     :param inv: node inventory
322     :param opnfv_env: file path for opnfv-environment file
323     :param net_env: file path for network-environment file
324     :param tmp_dir: Apex tmp dir
325     :return:
326     """
327
328     logging.info("Preparing opnfv-environment and network-environment files")
329     ds_opts = ds['deploy_options']
330     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
331     shutil.copyfile(opnfv_env, tmp_opnfv_env)
332     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
333     tenant_ctrl_nic = tenant_nic_map['controller']['members'][0]
334     tenant_comp_nic = tenant_nic_map['compute']['members'][0]
335
336     # SSH keys
337     private_key, public_key = make_ssh_key()
338
339     # Make easier/faster variables to index in the file editor
340     if 'performance' in ds_opts:
341         perf = True
342         # vpp
343         if 'vpp' in ds_opts['performance']['Compute']:
344             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
345         else:
346             perf_vpp_comp = None
347         if 'vpp' in ds_opts['performance']['Controller']:
348             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
349         else:
350             perf_vpp_ctrl = None
351
352         # ovs
353         if 'ovs' in ds_opts['performance']['Compute']:
354             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
355         else:
356             perf_ovs_comp = None
357
358         # kernel
359         if 'kernel' in ds_opts['performance']['Compute']:
360             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
361         else:
362             perf_kern_comp = None
363     else:
364         perf = False
365
366     # Modify OPNFV environment
367     # TODO: Change to build a dict and outputing yaml rather than parsing
368     for line in fileinput.input(tmp_opnfv_env, inplace=True):
369         line = line.strip('\n')
370         output_line = line
371         if 'CloudDomain' in line:
372             output_line = "  CloudDomain: {}".format(ns['domain_name'])
373         elif 'replace_private_key' in line:
374             output_line = "      key: '{}'".format(private_key)
375         elif 'replace_public_key' in line:
376             output_line = "      key: '{}'".format(public_key)
377
378         if ds_opts['sdn_controller'] == 'opendaylight' and \
379                 'odl_vpp_routing_node' in ds_opts:
380             if 'opendaylight::vpp_routing_node' in line:
381                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
382                                .format(ds_opts['odl_vpp_routing_node'],
383                                        ns['domain_name']))
384             elif 'ControllerExtraConfig' in line:
385                 output_line = ("  ControllerExtraConfig:\n    "
386                                "tripleo::profile::base::neutron::agents::"
387                                "honeycomb::interface_role_mapping:"
388                                " ['{}:tenant-interface]'"
389                                .format(tenant_ctrl_nic))
390             elif 'NovaComputeExtraConfig' in line:
391                 output_line = ("  NovaComputeExtraConfig:\n    "
392                                "tripleo::profile::base::neutron::agents::"
393                                "honeycomb::interface_role_mapping:"
394                                " ['{}:tenant-interface]'"
395                                .format(tenant_comp_nic))
396         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
397             if 'NeutronVPPAgentPhysnets' in line:
398                 output_line = ("  NeutronVPPAgentPhysnets: 'datacentre:{}'".
399                                format(tenant_ctrl_nic))
400         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
401                 'dvr') is True:
402             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
403                 output_line = ''
404             elif 'NeutronDhcpAgentsPerNetwork' in line:
405                 num_control, num_compute = _get_node_counts(inv)
406                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
407                                .format(num_compute))
408             elif 'ComputeServices' in line:
409                 output_line = ("  ComputeServices:\n"
410                                "    - OS::TripleO::Services::NeutronDhcpAgent")
411
412         if perf:
413             for role in 'NovaCompute', 'Controller':
414                 if role == 'NovaCompute':
415                     perf_opts = perf_vpp_comp
416                 else:
417                     perf_opts = perf_vpp_ctrl
418                 cfg = "{}ExtraConfig".format(role)
419                 if cfg in line and perf_opts:
420                     perf_line = ''
421                     if 'main-core' in perf_opts:
422                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
423                                       .format(perf_opts['main-core']))
424                     if 'corelist-workers' in perf_opts:
425                         perf_line += ("\n    "
426                                       "fdio::vpp_cpu_corelist_workers: '{}'"
427                                       .format(perf_opts['corelist-workers']))
428                     if perf_line:
429                         output_line = ("  {}:{}".format(cfg, perf_line))
430
431             # kernel args
432             # (FIXME) use compute's kernel settings for all nodes for now.
433             if 'ComputeKernelArgs' in line and perf_kern_comp:
434                 kernel_args = ''
435                 for k, v in perf_kern_comp.items():
436                     kernel_args += "{}={} ".format(k, v)
437                 if kernel_args:
438                     output_line = "  ComputeKernelArgs: '{}'".\
439                         format(kernel_args)
440             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
441                 for k, v in OVS_PERF_MAP.items():
442                     if k in line and v in perf_ovs_comp:
443                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
444
445         print(output_line)
446
447     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
448
449     # Modify Network environment
450     for line in fileinput.input(net_env, inplace=True):
451         line = line.strip('\n')
452         if 'ComputeExtraConfigPre' in line and \
453                 ds_opts['dataplane'] == 'ovs_dpdk':
454             print('  OS::TripleO::ComputeExtraConfigPre: '
455                   './ovs-dpdk-preconfig.yaml')
456         elif perf and perf_kern_comp:
457             if 'resource_registry' in line:
458                 print("resource_registry:\n"
459                       "  OS::TripleO::NodeUserData: first-boot.yaml")
460             elif 'NovaSchedulerDefaultFilters' in line:
461                 print("  NovaSchedulerDefaultFilters: 'RamFilter,"
462                       "ComputeFilter,AvailabilityZoneFilter,"
463                       "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
464                       "NUMATopologyFilter'")
465             else:
466                 print(line)
467         else:
468             print(line)
469
470     logging.info("network-environment file written to {}".format(net_env))
471
472
473 def generate_ceph_key():
474     key = os.urandom(16)
475     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
476     return base64.b64encode(header + key)
477
478
479 def prep_storage_env(ds, tmp_dir):
480     """
481     Creates storage environment file for deployment.  Source file is copied by
482     undercloud playbook to host.
483     :param ds:
484     :param tmp_dir:
485     :return:
486     """
487     ds_opts = ds['deploy_options']
488     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
489     if not os.path.isfile(storage_file):
490         logging.error("storage-environment file is not in tmp directory: {}. "
491                       "Check if file was copied from "
492                       "undercloud".format(tmp_dir))
493         raise ApexDeployException("storage-environment file not copied from "
494                                   "undercloud")
495     for line in fileinput.input(storage_file, inplace=True):
496         line = line.strip('\n')
497         if 'CephClusterFSID' in line:
498             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
499         elif 'CephMonKey' in line:
500             print("  CephMonKey: {}".format(generate_ceph_key().decode(
501                 'utf-8')))
502         elif 'CephAdminKey' in line:
503             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
504                 'utf-8')))
505         else:
506             print(line)
507     if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
508         with open(storage_file, 'a') as fh:
509             fh.write('  ExtraConfig:\n')
510             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
511                 ds_opts['ceph_device']
512             ))
513
514
515 def external_network_cmds(ns):
516     """
517     Generates external network openstack commands
518     :param ns: network settings
519     :return: list of commands to configure external network
520     """
521     if 'external' in ns.enabled_network_list:
522         net_config = ns['networks']['external'][0]
523         external = True
524         pool_start, pool_end = net_config['floating_ip_range']
525     else:
526         net_config = ns['networks']['admin']
527         external = False
528         pool_start, pool_end = ns['apex']['networks']['admin'][
529             'introspection_range']
530     nic_config = net_config['nic_mapping']
531     gateway = net_config['gateway']
532     cmds = list()
533     # create network command
534     if nic_config['compute']['vlan'] == 'native':
535         ext_type = 'flat'
536     else:
537         ext_type = "vlan --provider-segment {}".format(nic_config[
538                                                        'compute']['vlan'])
539     cmds.append("openstack network create external --project service "
540                 "--external --provider-network-type {} "
541                 "--provider-physical-network datacentre".format(ext_type))
542     # create subnet command
543     cidr = net_config['cidr']
544     subnet_cmd = "openstack subnet create external-subnet --project " \
545                  "service --network external --no-dhcp --gateway {} " \
546                  "--allocation-pool start={},end={} --subnet-range " \
547                  "{}".format(gateway, pool_start, pool_end, str(cidr))
548     if external and cidr.version == 6:
549         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
550                       '--ipv6-address-mode slaac'
551     cmds.append(subnet_cmd)
552     logging.debug("Neutron external network commands determined "
553                   "as: {}".format(cmds))
554     return cmds
555
556
557 def create_congress_cmds(overcloud_file):
558     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
559     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
560     logging.info("Creating congress commands")
561     try:
562         ds_cfg = [
563             "username={}".format(overcloudrc['OS_USERNAME']),
564             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
565             "password={}".format(overcloudrc['OS_PASSWORD']),
566             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
567         ]
568     except KeyError:
569         logging.error("Unable to find all keys required for congress in "
570                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
571                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
572                       "file: {}".format(overcloud_file))
573         raise
574     cmds = list()
575     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
576
577     for driver in drivers:
578         if driver == 'doctor':
579             cmd = "{} \"{}\"".format(driver, driver)
580         else:
581             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
582         if driver == 'nova':
583             cmd += ' --config api_version="2.34"'
584         logging.debug("Congress command created: {}".format(cmd))
585         cmds.append(cmd)
586     return cmds