Merge "Fixes stale undercloud delorean repos"
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import shutil
16 import uuid
17 import struct
18 import time
19
20 from apex.common import constants as con
21 from apex.common.exceptions import ApexDeployException
22 from apex.common import parsers
23 from apex.virtual import utils as virt_utils
24 from cryptography.hazmat.primitives import serialization as \
25     crypto_serialization
26 from cryptography.hazmat.primitives.asymmetric import rsa
27 from cryptography.hazmat.backends import default_backend as \
28     crypto_default_backend
29
30
31 SDN_FILE_MAP = {
32     'opendaylight': {
33         'sfc': 'neutron-sfc-opendaylight.yaml',
34         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
35         'gluon': 'gluon.yaml',
36         'vpp': {
37             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
38             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
39             'default': 'neutron-opendaylight-honeycomb.yaml'
40         },
41         'l2gw': 'neutron-l2gw-opendaylight.yaml',
42         'default': 'neutron-opendaylight.yaml',
43     },
44     'onos': {
45         'sfc': 'neutron-onos-sfc.yaml',
46         'default': 'neutron-onos.yaml'
47     },
48     'ovn': 'neutron-ml2-ovn.yaml',
49     False: {
50         'vpp': 'neutron-ml2-vpp.yaml',
51         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
52     }
53 }
54
55 OTHER_FILE_MAP = {
56     'tacker': 'enable_tacker.yaml',
57     'congress': 'enable_congress.yaml',
58     'barometer': 'enable_barometer.yaml',
59     'rt_kvm': 'enable_rt_kvm.yaml'
60 }
61
62 OVS_PERF_MAP = {
63     'HostCpusList': 'dpdk_cores',
64     'NeutronDpdkCoreList': 'pmd_cores',
65     'NeutronDpdkSocketMemory': 'socket_memory',
66     'NeutronDpdkMemoryChannels': 'memory_channels'
67 }
68
69 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
70 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
71 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
72                       ".noarch.rpm"
73
74
75 def build_sdn_env_list(ds, sdn_map, env_list=None):
76     """
77     Builds a list of SDN environment files to be used in the deploy cmd.
78
79     This function recursively searches an sdn_map.  First the sdn controller is
80     matched and then the function looks for enabled features for that
81     controller to determine which environment files should be used.  By
82     default the feature will be checked if set to true in deploy settings to be
83     added to the list.  If a feature does not have a boolean value, then the
84     key and value pair to compare with are checked as a tuple (k,v).
85
86     :param ds: deploy settings
87     :param sdn_map: SDN map to recursively search
88     :param env_list: recursive var to hold previously found env_list
89     :return: A list of env files
90     """
91     if env_list is None:
92         env_list = list()
93     for k, v in sdn_map.items():
94         if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
95             if isinstance(v, dict):
96                 # Append default SDN env file first
97                 # The assumption is that feature-enabled SDN env files
98                 # override and do not conflict with previously set default
99                 # settings
100                 if ds['sdn_controller'] == k and 'default' in v:
101                     env_list.append(os.path.join(con.THT_ENV_DIR,
102                                                  v['default']))
103                 env_list.extend(build_sdn_env_list(ds, v))
104             else:
105                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
106         # check if the value is not a boolean
107         elif isinstance(v, tuple):
108                 if ds[k] == v[0]:
109                     env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
110     if len(env_list) == 0:
111         try:
112             env_list.append(os.path.join(
113                 con.THT_ENV_DIR, sdn_map['default']))
114         except KeyError:
115             logging.warning("Unable to find default file for SDN")
116
117     return env_list
118
119
120 def create_deploy_cmd(ds, ns, inv, tmp_dir,
121                       virtual, env_file='opnfv-environment.yaml',
122                       net_data=False):
123
124     logging.info("Creating deployment command")
125     deploy_options = ['network-environment.yaml']
126
127     if env_file:
128         deploy_options.append(env_file)
129     ds_opts = ds['deploy_options']
130     deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
131
132     for k, v in OTHER_FILE_MAP.items():
133         if k in ds_opts and ds_opts[k]:
134             deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
135
136     if ds_opts['ceph']:
137         prep_storage_env(ds, tmp_dir)
138         deploy_options.append(os.path.join(con.THT_ENV_DIR,
139                                            'storage-environment.yaml'))
140     if ds['global_params']['ha_enabled']:
141         deploy_options.append(os.path.join(con.THT_ENV_DIR,
142                                            'puppet-pacemaker.yaml'))
143
144     if virtual:
145         deploy_options.append('virtual-environment.yaml')
146     else:
147         deploy_options.append('baremetal-environment.yaml')
148
149     num_control, num_compute = inv.get_node_counts()
150     if num_control == 0 or num_compute == 0:
151         logging.error("Detected 0 control or compute nodes.  Control nodes: "
152                       "{}, compute nodes{}".format(num_control, num_compute))
153         raise ApexDeployException("Invalid number of control or computes")
154     elif num_control > 1 and not ds['global_params']['ha_enabled']:
155         num_control = 1
156     if platform.machine() == 'aarch64':
157         # aarch64 deploys were not completing in the default 90 mins.
158         # Not sure if this is related to the hardware the OOO support
159         # was developed on or the virtualization support in CentOS
160         # Either way it will probably get better over time  as the aarch
161         # support matures in CentOS and deploy time should be tested in
162         # the future so this multiplier can be removed.
163         con.DEPLOY_TIMEOUT *= 2
164     cmd = "openstack overcloud deploy --templates --timeout {} " \
165           .format(con.DEPLOY_TIMEOUT)
166     # build cmd env args
167     for option in deploy_options:
168         cmd += " -e {}".format(option)
169     cmd += " --ntp-server {}".format(ns['ntp'][0])
170     cmd += " --control-scale {}".format(num_control)
171     cmd += " --compute-scale {}".format(num_compute)
172     cmd += ' --control-flavor control --compute-flavor compute'
173     if net_data:
174         cmd += ' --networks-file network_data.yaml'
175     libvirt_type = 'kvm'
176     if virtual:
177         with open('/sys/module/kvm_intel/parameters/nested') as f:
178             nested_kvm = f.read().strip()
179             if nested_kvm != 'Y':
180                 libvirt_type = 'qemu'
181     cmd += ' --libvirt-type {}'.format(libvirt_type)
182     logging.info("Deploy command set: {}".format(cmd))
183
184     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
185         fh.write(cmd)
186     return cmd
187
188
189 def prep_image(ds, ns, img, tmp_dir, root_pw=None):
190     """
191     Locates sdn image and preps for deployment.
192     :param ds: deploy settings
193     :param ns: network settings
194     :param img: sdn image
195     :param tmp_dir: dir to store modified sdn image
196     :param root_pw: password to configure for overcloud image
197     :return: None
198     """
199     # TODO(trozet): Come up with a better way to organize this logic in this
200     # function
201     logging.info("Preparing image: {} for deployment".format(img))
202     if not os.path.isfile(img):
203         logging.error("Missing SDN image {}".format(img))
204         raise ApexDeployException("Missing SDN image file: {}".format(img))
205
206     ds_opts = ds['deploy_options']
207     virt_cmds = list()
208     sdn = ds_opts['sdn_controller']
209     # we need this due to rhbz #1436021
210     # fixed in systemd-219-37.el7
211     if sdn is not False:
212         logging.info("Neutron openvswitch-agent disabled")
213         virt_cmds.extend([{
214             con.VIRT_RUN_CMD:
215                 "rm -f /etc/systemd/system/multi-user.target.wants/"
216                 "neutron-openvswitch-agent.service"},
217             {
218             con.VIRT_RUN_CMD:
219                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
220                 ".service"
221         }])
222
223     if ns.get('http_proxy', ''):
224         virt_cmds.append({
225             con.VIRT_RUN_CMD:
226                 "echo 'http_proxy={}' >> /etc/environment".format(
227                     ns['http_proxy'])})
228
229     if ns.get('https_proxy', ''):
230         virt_cmds.append({
231             con.VIRT_RUN_CMD:
232                 "echo 'https_proxy={}' >> /etc/environment".format(
233                     ns['https_proxy'])})
234
235     if ds_opts['vpn']:
236         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
237         virt_cmds.append({
238             con.VIRT_RUN_CMD:
239                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
240                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
241         virt_cmds.append({
242             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
243                               "zrpcd_start.sh"})
244         virt_cmds.append({
245             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
246                               "init.d/zrpcd_start.sh' /etc/rc.local "})
247         virt_cmds.append({
248             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
249                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
250         logging.info("ZRPCD process started")
251
252     dataplane = ds_opts['dataplane']
253     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
254         logging.info("Enabling kernel modules for dpdk")
255         # file to module mapping
256         uio_types = {
257             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
258             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
259         }
260         for mod_file, mod in uio_types.items():
261             with open(mod_file, 'w') as fh:
262                 fh.write('#!/bin/bash\n')
263                 fh.write('exec /sbin/modprobe {}'.format(mod))
264                 fh.close()
265
266             virt_cmds.extend([
267                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
268                     mod_file)},
269                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
270                                    "{}".format(os.path.basename(mod_file))}
271             ])
272     if root_pw:
273         pw_op = "password:{}".format(root_pw)
274         virt_cmds.append({con.VIRT_PW: pw_op})
275     if ds_opts['sfc'] and dataplane == 'ovs':
276         virt_cmds.extend([
277             {con.VIRT_RUN_CMD: "yum -y install "
278                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
279                                "{}".format(OVS_NSH_KMOD_RPM)},
280             {con.VIRT_RUN_CMD: "yum downgrade -y "
281                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
282                                "{}".format(OVS_NSH_RPM)}
283         ])
284     if dataplane == 'fdio':
285         # Patch neutron with using OVS external interface for router
286         # and add generic linux NS interface driver
287         virt_cmds.append(
288             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
289                                "-p1 < neutron-patch-NSDriver.patch"})
290         if sdn is False:
291             virt_cmds.extend([
292                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
293                 {con.VIRT_RUN_CMD: "yum install -y "
294                                    "/root/nosdn_vpp_rpms/*.rpm"}
295             ])
296
297     if sdn == 'opendaylight':
298         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
299             virt_cmds.extend([
300                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
301                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
302                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
303                                    "/root/puppet-opendaylight-"
304                                    "{}.tar.gz".format(ds_opts['odl_version'])}
305             ])
306             if ds_opts['odl_version'] == 'master':
307                 virt_cmds.extend([
308                     {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
309                         ds_opts['odl_version'])}
310                 ])
311             else:
312                 virt_cmds.extend([
313                     {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
314                         ds_opts['odl_version'])}
315                 ])
316
317         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
318                 and ds_opts['odl_vpp_netvirt']:
319             virt_cmds.extend([
320                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
321                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
322                     ODL_NETVIRT_VPP_RPM)}
323             ])
324
325     if sdn == 'ovn':
326         virt_cmds.extend([
327             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
328                                "*openvswitch*"},
329             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
330                                "*openvswitch*"}
331         ])
332
333     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
334     shutil.copyfile(img, tmp_oc_image)
335     logging.debug("Temporary overcloud image stored as: {}".format(
336         tmp_oc_image))
337     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
338     logging.info("Overcloud image customization complete")
339
340
341 def make_ssh_key():
342     """
343     Creates public and private ssh keys with 1024 bit RSA encryption
344     :return: private, public key
345     """
346     key = rsa.generate_private_key(
347         backend=crypto_default_backend(),
348         public_exponent=65537,
349         key_size=1024
350     )
351
352     private_key = key.private_bytes(
353         crypto_serialization.Encoding.PEM,
354         crypto_serialization.PrivateFormat.PKCS8,
355         crypto_serialization.NoEncryption())
356     public_key = key.public_key().public_bytes(
357         crypto_serialization.Encoding.OpenSSH,
358         crypto_serialization.PublicFormat.OpenSSH
359     )
360     return private_key.decode('utf-8'), public_key.decode('utf-8')
361
362
363 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
364     """
365     Creates modified opnfv/network environments for deployment
366     :param ds: deploy settings
367     :param ns: network settings
368     :param inv: node inventory
369     :param opnfv_env: file path for opnfv-environment file
370     :param net_env: file path for network-environment file
371     :param tmp_dir: Apex tmp dir
372     :return:
373     """
374
375     logging.info("Preparing opnfv-environment and network-environment files")
376     ds_opts = ds['deploy_options']
377     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
378     shutil.copyfile(opnfv_env, tmp_opnfv_env)
379     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
380     tenant_nic = dict()
381     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
382     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
383     external_nic_map = ns['networks']['external'][0]['nic_mapping']
384     external_nic = dict()
385     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
386
387     # SSH keys
388     private_key, public_key = make_ssh_key()
389
390     # Make easier/faster variables to index in the file editor
391     if 'performance' in ds_opts:
392         perf = True
393         # vpp
394         if 'vpp' in ds_opts['performance']['Compute']:
395             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
396         else:
397             perf_vpp_comp = None
398         if 'vpp' in ds_opts['performance']['Controller']:
399             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
400         else:
401             perf_vpp_ctrl = None
402
403         # ovs
404         if 'ovs' in ds_opts['performance']['Compute']:
405             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
406         else:
407             perf_ovs_comp = None
408
409         # kernel
410         if 'kernel' in ds_opts['performance']['Compute']:
411             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
412         else:
413             perf_kern_comp = None
414     else:
415         perf = False
416
417     # Modify OPNFV environment
418     # TODO: Change to build a dict and outputting yaml rather than parsing
419     for line in fileinput.input(tmp_opnfv_env, inplace=True):
420         line = line.strip('\n')
421         output_line = line
422         if 'CloudDomain' in line:
423             output_line = "  CloudDomain: {}".format(ns['domain_name'])
424         elif 'replace_private_key' in line:
425             output_line = "    private_key: |\n"
426             key_out = ''
427             for line in private_key.splitlines():
428                 key_out += "      {}\n".format(line)
429             output_line += key_out
430         elif 'replace_public_key' in line:
431             output_line = "    public_key: '{}'".format(public_key)
432         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
433                 'resource_registry' in line:
434             output_line = "resource_registry:\n" \
435                           "  OS::TripleO::NodeUserData: first-boot.yaml"
436         elif 'ComputeExtraConfigPre' in line and \
437                 ds_opts['dataplane'] == 'ovs_dpdk':
438             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
439                           './ovs-dpdk-preconfig.yaml'
440
441         if ds_opts['sdn_controller'] == 'opendaylight' and \
442                 'odl_vpp_routing_node' in ds_opts:
443             if 'opendaylight::vpp_routing_node' in line:
444                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
445                                .format(ds_opts['odl_vpp_routing_node'],
446                                        ns['domain_name']))
447         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
448             if 'NeutronVPPAgentPhysnets' in line:
449                 output_line = ("  NeutronVPPAgentPhysnets: 'datacentre:{}'".
450                                format(tenant_nic['Controller']))
451         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
452                 'dvr') is True:
453             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
454                 output_line = ''
455             elif 'NeutronDhcpAgentsPerNetwork' in line:
456                 num_control, num_compute = inv.get_node_counts()
457                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
458                                .format(num_compute))
459             elif 'ComputeServices' in line:
460                 output_line = ("  ComputeServices:\n"
461                                "    - OS::TripleO::Services::NeutronDhcpAgent")
462
463         if perf:
464             for role in 'NovaCompute', 'Controller':
465                 if role == 'NovaCompute':
466                     perf_opts = perf_vpp_comp
467                 else:
468                     perf_opts = perf_vpp_ctrl
469                 cfg = "{}ExtraConfig".format(role)
470                 if cfg in line and perf_opts:
471                     perf_line = ''
472                     if 'main-core' in perf_opts:
473                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
474                                       .format(perf_opts['main-core']))
475                     if 'corelist-workers' in perf_opts:
476                         perf_line += ("\n    "
477                                       "fdio::vpp_cpu_corelist_workers: '{}'"
478                                       .format(perf_opts['corelist-workers']))
479                     if ds_opts['sdn_controller'] == 'opendaylight' and \
480                             ds_opts['dataplane'] == 'fdio':
481                         if role == 'NovaCompute':
482                             perf_line += ("\n    "
483                                           "tripleo::profile::base::neutron::"
484                                           "agents::honeycomb::"
485                                           "interface_role_mapping:"
486                                           " ['{}:tenant-interface',"
487                                           "'{}:public-interface']"
488                                           .format(tenant_nic[role],
489                                                   external_nic[role]))
490                         else:
491                             perf_line += ("\n    "
492                                           "tripleo::profile::base::neutron::"
493                                           "agents::honeycomb::"
494                                           "interface_role_mapping:"
495                                           " ['{}:tenant-interface']"
496                                           .format(tenant_nic[role]))
497                     if perf_line:
498                         output_line = ("  {}:{}".format(cfg, perf_line))
499
500             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
501                 for k, v in OVS_PERF_MAP.items():
502                     if k in line and v in perf_ovs_comp:
503                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
504
505             # kernel args
506             # (FIXME) use compute's kernel settings for all nodes for now.
507             if perf_kern_comp:
508                 if 'NovaSchedulerDefaultFilters' in line:
509                     output_line = \
510                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
511                         "ComputeFilter,AvailabilityZoneFilter," \
512                         "ComputeCapabilitiesFilter," \
513                         "ImagePropertiesFilter,NUMATopologyFilter'"
514                 elif 'ComputeKernelArgs' in line:
515                     kernel_args = ''
516                     for k, v in perf_kern_comp.items():
517                         kernel_args += "{}={} ".format(k, v)
518                     if kernel_args:
519                         output_line = "  ComputeKernelArgs: '{}'".\
520                             format(kernel_args)
521
522         print(output_line)
523
524     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
525
526
527 def generate_ceph_key():
528     key = os.urandom(16)
529     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
530     return base64.b64encode(header + key)
531
532
533 def prep_storage_env(ds, tmp_dir):
534     """
535     Creates storage environment file for deployment.  Source file is copied by
536     undercloud playbook to host.
537     :param ds:
538     :param tmp_dir:
539     :return:
540     """
541     ds_opts = ds['deploy_options']
542     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
543     if not os.path.isfile(storage_file):
544         logging.error("storage-environment file is not in tmp directory: {}. "
545                       "Check if file was copied from "
546                       "undercloud".format(tmp_dir))
547         raise ApexDeployException("storage-environment file not copied from "
548                                   "undercloud")
549     for line in fileinput.input(storage_file, inplace=True):
550         line = line.strip('\n')
551         if 'CephClusterFSID' in line:
552             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
553         elif 'CephMonKey' in line:
554             print("  CephMonKey: {}".format(generate_ceph_key().decode(
555                 'utf-8')))
556         elif 'CephAdminKey' in line:
557             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
558                 'utf-8')))
559         elif 'CephClientKey' in line:
560             print("  CephClientKey: {}".format(generate_ceph_key().decode(
561                 'utf-8')))
562         else:
563             print(line)
564     if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
565         with open(storage_file, 'a') as fh:
566             fh.write('  ExtraConfig:\n')
567             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
568                 ds_opts['ceph_device']
569             ))
570
571
572 def external_network_cmds(ns):
573     """
574     Generates external network openstack commands
575     :param ns: network settings
576     :return: list of commands to configure external network
577     """
578     if 'external' in ns.enabled_network_list:
579         net_config = ns['networks']['external'][0]
580         external = True
581         pool_start, pool_end = net_config['floating_ip_range']
582     else:
583         net_config = ns['networks']['admin']
584         external = False
585         pool_start, pool_end = ns['apex']['networks']['admin'][
586             'introspection_range']
587     nic_config = net_config['nic_mapping']
588     gateway = net_config['gateway']
589     cmds = list()
590     # create network command
591     if nic_config['compute']['vlan'] == 'native':
592         ext_type = 'flat'
593     else:
594         ext_type = "vlan --provider-segment {}".format(nic_config[
595                                                        'compute']['vlan'])
596     cmds.append("openstack network create external --project service "
597                 "--external --provider-network-type {} "
598                 "--provider-physical-network datacentre".format(ext_type))
599     # create subnet command
600     cidr = net_config['cidr']
601     subnet_cmd = "openstack subnet create external-subnet --project " \
602                  "service --network external --no-dhcp --gateway {} " \
603                  "--allocation-pool start={},end={} --subnet-range " \
604                  "{}".format(gateway, pool_start, pool_end, str(cidr))
605     if external and cidr.version == 6:
606         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
607                       '--ipv6-address-mode slaac'
608     cmds.append(subnet_cmd)
609     logging.debug("Neutron external network commands determined "
610                   "as: {}".format(cmds))
611     return cmds
612
613
614 def create_congress_cmds(overcloud_file):
615     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
616     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
617     logging.info("Creating congress commands")
618     try:
619         ds_cfg = [
620             "username={}".format(overcloudrc['OS_USERNAME']),
621             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
622             "password={}".format(overcloudrc['OS_PASSWORD']),
623             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
624         ]
625     except KeyError:
626         logging.error("Unable to find all keys required for congress in "
627                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
628                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
629                       "file: {}".format(overcloud_file))
630         raise
631     cmds = list()
632     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
633
634     for driver in drivers:
635         if driver == 'doctor':
636             cmd = "{} \"{}\"".format(driver, driver)
637         else:
638             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
639         if driver == 'nova':
640             cmd += ' --config api_version="2.34"'
641         logging.debug("Congress command created: {}".format(cmd))
642         cmds.append(cmd)
643     return cmds