Merge "Adding SRIOV scenario"
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import shutil
16 import uuid
17 import struct
18 import time
19
20 from apex.common import constants as con
21 from apex.common.exceptions import ApexDeployException
22 from apex.common import parsers
23 from apex.virtual import utils as virt_utils
24 from cryptography.hazmat.primitives import serialization as \
25     crypto_serialization
26 from cryptography.hazmat.primitives.asymmetric import rsa
27 from cryptography.hazmat.backends import default_backend as \
28     crypto_default_backend
29
30
31 SDN_FILE_MAP = {
32     'opendaylight': {
33         'sfc': 'neutron-sfc-opendaylight.yaml',
34         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
35         'gluon': 'gluon.yaml',
36         'vpp': {
37             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
38             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
39             'default': 'neutron-opendaylight-honeycomb.yaml'
40         },
41         'l2gw': 'neutron-l2gw-opendaylight.yaml',
42         'sriov': 'neutron-opendaylight-sriov.yaml',
43         'default': 'neutron-opendaylight.yaml',
44     },
45     'onos': {
46         'sfc': 'neutron-onos-sfc.yaml',
47         'default': 'neutron-onos.yaml'
48     },
49     'ovn': 'neutron-ml2-ovn.yaml',
50     False: {
51         'vpp': 'neutron-ml2-vpp.yaml',
52         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
53     }
54 }
55
56 OTHER_FILE_MAP = {
57     'tacker': 'enable_tacker.yaml',
58     'congress': 'enable_congress.yaml',
59     'barometer': 'enable_barometer.yaml',
60     'rt_kvm': 'enable_rt_kvm.yaml'
61 }
62
63 OVS_PERF_MAP = {
64     'HostCpusList': 'dpdk_cores',
65     'NeutronDpdkCoreList': 'pmd_cores',
66     'NeutronDpdkSocketMemory': 'socket_memory',
67     'NeutronDpdkMemoryChannels': 'memory_channels'
68 }
69
70 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
71 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
72 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
73                       ".noarch.rpm"
74
75
76 def build_sdn_env_list(ds, sdn_map, env_list=None):
77     """
78     Builds a list of SDN environment files to be used in the deploy cmd.
79
80     This function recursively searches an sdn_map.  First the sdn controller is
81     matched and then the function looks for enabled features for that
82     controller to determine which environment files should be used.  By
83     default the feature will be checked if set to true in deploy settings to be
84     added to the list.  If a feature does not have a boolean value, then the
85     key and value pair to compare with are checked as a tuple (k,v).
86
87     :param ds: deploy settings
88     :param sdn_map: SDN map to recursively search
89     :param env_list: recursive var to hold previously found env_list
90     :return: A list of env files
91     """
92     if env_list is None:
93         env_list = list()
94     for k, v in sdn_map.items():
95         if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
96             if isinstance(v, dict):
97                 # Append default SDN env file first
98                 # The assumption is that feature-enabled SDN env files
99                 # override and do not conflict with previously set default
100                 # settings
101                 if ds['sdn_controller'] == k and 'default' in v:
102                     env_list.append(os.path.join(con.THT_ENV_DIR,
103                                                  v['default']))
104                 env_list.extend(build_sdn_env_list(ds, v))
105             else:
106                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
107         # check if the value is not a boolean
108         elif isinstance(v, tuple):
109                 if ds[k] == v[0]:
110                     env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
111     if len(env_list) == 0:
112         try:
113             env_list.append(os.path.join(
114                 con.THT_ENV_DIR, sdn_map['default']))
115         except KeyError:
116             logging.warning("Unable to find default file for SDN")
117
118     return env_list
119
120
121 def create_deploy_cmd(ds, ns, inv, tmp_dir,
122                       virtual, env_file='opnfv-environment.yaml',
123                       net_data=False):
124
125     logging.info("Creating deployment command")
126     deploy_options = ['network-environment.yaml']
127
128     if env_file:
129         deploy_options.append(env_file)
130     ds_opts = ds['deploy_options']
131     deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
132
133     for k, v in OTHER_FILE_MAP.items():
134         if k in ds_opts and ds_opts[k]:
135             deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
136
137     if ds_opts['ceph']:
138         prep_storage_env(ds, tmp_dir)
139         deploy_options.append(os.path.join(con.THT_ENV_DIR,
140                                            'storage-environment.yaml'))
141     if ds_opts['sriov']:
142         prep_sriov_env(ds, tmp_dir)
143
144     if ds['global_params']['ha_enabled']:
145         deploy_options.append(os.path.join(con.THT_ENV_DIR,
146                                            'puppet-pacemaker.yaml'))
147
148     if virtual:
149         deploy_options.append('virtual-environment.yaml')
150     else:
151         deploy_options.append('baremetal-environment.yaml')
152
153     num_control, num_compute = inv.get_node_counts()
154     if num_control == 0 or num_compute == 0:
155         logging.error("Detected 0 control or compute nodes.  Control nodes: "
156                       "{}, compute nodes{}".format(num_control, num_compute))
157         raise ApexDeployException("Invalid number of control or computes")
158     elif num_control > 1 and not ds['global_params']['ha_enabled']:
159         num_control = 1
160     if platform.machine() == 'aarch64':
161         # aarch64 deploys were not completing in the default 90 mins.
162         # Not sure if this is related to the hardware the OOO support
163         # was developed on or the virtualization support in CentOS
164         # Either way it will probably get better over time  as the aarch
165         # support matures in CentOS and deploy time should be tested in
166         # the future so this multiplier can be removed.
167         con.DEPLOY_TIMEOUT *= 2
168     cmd = "openstack overcloud deploy --templates --timeout {} " \
169           .format(con.DEPLOY_TIMEOUT)
170     # build cmd env args
171     for option in deploy_options:
172         cmd += " -e {}".format(option)
173     cmd += " --ntp-server {}".format(ns['ntp'][0])
174     cmd += " --control-scale {}".format(num_control)
175     cmd += " --compute-scale {}".format(num_compute)
176     cmd += ' --control-flavor control --compute-flavor compute'
177     if net_data:
178         cmd += ' --networks-file network_data.yaml'
179     libvirt_type = 'kvm'
180     if virtual:
181         with open('/sys/module/kvm_intel/parameters/nested') as f:
182             nested_kvm = f.read().strip()
183             if nested_kvm != 'Y':
184                 libvirt_type = 'qemu'
185     cmd += ' --libvirt-type {}'.format(libvirt_type)
186     logging.info("Deploy command set: {}".format(cmd))
187
188     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
189         fh.write(cmd)
190     return cmd
191
192
193 def prep_image(ds, ns, img, tmp_dir, root_pw=None):
194     """
195     Locates sdn image and preps for deployment.
196     :param ds: deploy settings
197     :param ns: network settings
198     :param img: sdn image
199     :param tmp_dir: dir to store modified sdn image
200     :param root_pw: password to configure for overcloud image
201     :return: None
202     """
203     # TODO(trozet): Come up with a better way to organize this logic in this
204     # function
205     logging.info("Preparing image: {} for deployment".format(img))
206     if not os.path.isfile(img):
207         logging.error("Missing SDN image {}".format(img))
208         raise ApexDeployException("Missing SDN image file: {}".format(img))
209
210     ds_opts = ds['deploy_options']
211     virt_cmds = list()
212     sdn = ds_opts['sdn_controller']
213     # we need this due to rhbz #1436021
214     # fixed in systemd-219-37.el7
215     if sdn is not False:
216         logging.info("Neutron openvswitch-agent disabled")
217         virt_cmds.extend([{
218             con.VIRT_RUN_CMD:
219                 "rm -f /etc/systemd/system/multi-user.target.wants/"
220                 "neutron-openvswitch-agent.service"},
221             {
222             con.VIRT_RUN_CMD:
223                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
224                 ".service"
225         }])
226
227     if ns.get('http_proxy', ''):
228         virt_cmds.append({
229             con.VIRT_RUN_CMD:
230                 "echo 'http_proxy={}' >> /etc/environment".format(
231                     ns['http_proxy'])})
232
233     if ns.get('https_proxy', ''):
234         virt_cmds.append({
235             con.VIRT_RUN_CMD:
236                 "echo 'https_proxy={}' >> /etc/environment".format(
237                     ns['https_proxy'])})
238
239     if ds_opts['vpn']:
240         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
241         virt_cmds.append({
242             con.VIRT_RUN_CMD:
243                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
244                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
245         virt_cmds.append({
246             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
247                               "zrpcd_start.sh"})
248         virt_cmds.append({
249             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
250                               "init.d/zrpcd_start.sh' /etc/rc.local "})
251         virt_cmds.append({
252             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
253                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
254         logging.info("ZRPCD process started")
255
256     dataplane = ds_opts['dataplane']
257     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
258         logging.info("Enabling kernel modules for dpdk")
259         # file to module mapping
260         uio_types = {
261             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
262             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
263         }
264         for mod_file, mod in uio_types.items():
265             with open(mod_file, 'w') as fh:
266                 fh.write('#!/bin/bash\n')
267                 fh.write('exec /sbin/modprobe {}'.format(mod))
268                 fh.close()
269
270             virt_cmds.extend([
271                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
272                     mod_file)},
273                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
274                                    "{}".format(os.path.basename(mod_file))}
275             ])
276     if root_pw:
277         pw_op = "password:{}".format(root_pw)
278         virt_cmds.append({con.VIRT_PW: pw_op})
279     if ds_opts['sfc'] and dataplane == 'ovs':
280         virt_cmds.extend([
281             {con.VIRT_RUN_CMD: "yum -y install "
282                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
283                                "{}".format(OVS_NSH_KMOD_RPM)},
284             {con.VIRT_RUN_CMD: "yum downgrade -y "
285                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
286                                "{}".format(OVS_NSH_RPM)}
287         ])
288     if dataplane == 'fdio':
289         # Patch neutron with using OVS external interface for router
290         # and add generic linux NS interface driver
291         virt_cmds.append(
292             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
293                                "-p1 < neutron-patch-NSDriver.patch"})
294         if sdn is False:
295             virt_cmds.extend([
296                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
297                 {con.VIRT_RUN_CMD: "yum install -y "
298                                    "/root/nosdn_vpp_rpms/*.rpm"}
299             ])
300
301     if sdn == 'opendaylight':
302         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
303             virt_cmds.extend([
304                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
305                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
306                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
307                                    "/root/puppet-opendaylight-"
308                                    "{}.tar.gz".format(ds_opts['odl_version'])}
309             ])
310             if ds_opts['odl_version'] == 'master':
311                 virt_cmds.extend([
312                     {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
313                         ds_opts['odl_version'])}
314                 ])
315             else:
316                 virt_cmds.extend([
317                     {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
318                         ds_opts['odl_version'])}
319                 ])
320
321         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
322                 and ds_opts['odl_vpp_netvirt']:
323             virt_cmds.extend([
324                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
325                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
326                     ODL_NETVIRT_VPP_RPM)}
327             ])
328
329     if sdn == 'ovn':
330         virt_cmds.extend([
331             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
332                                "*openvswitch*"},
333             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
334                                "*openvswitch*"}
335         ])
336
337     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
338     shutil.copyfile(img, tmp_oc_image)
339     logging.debug("Temporary overcloud image stored as: {}".format(
340         tmp_oc_image))
341     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
342     logging.info("Overcloud image customization complete")
343
344
345 def make_ssh_key():
346     """
347     Creates public and private ssh keys with 1024 bit RSA encryption
348     :return: private, public key
349     """
350     key = rsa.generate_private_key(
351         backend=crypto_default_backend(),
352         public_exponent=65537,
353         key_size=1024
354     )
355
356     private_key = key.private_bytes(
357         crypto_serialization.Encoding.PEM,
358         crypto_serialization.PrivateFormat.PKCS8,
359         crypto_serialization.NoEncryption())
360     public_key = key.public_key().public_bytes(
361         crypto_serialization.Encoding.OpenSSH,
362         crypto_serialization.PublicFormat.OpenSSH
363     )
364     return private_key.decode('utf-8'), public_key.decode('utf-8')
365
366
367 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
368     """
369     Creates modified opnfv/network environments for deployment
370     :param ds: deploy settings
371     :param ns: network settings
372     :param inv: node inventory
373     :param opnfv_env: file path for opnfv-environment file
374     :param net_env: file path for network-environment file
375     :param tmp_dir: Apex tmp dir
376     :return:
377     """
378
379     logging.info("Preparing opnfv-environment and network-environment files")
380     ds_opts = ds['deploy_options']
381     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
382     shutil.copyfile(opnfv_env, tmp_opnfv_env)
383     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
384     tenant_nic = dict()
385     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
386     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
387     external_nic_map = ns['networks']['external'][0]['nic_mapping']
388     external_nic = dict()
389     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
390
391     # SSH keys
392     private_key, public_key = make_ssh_key()
393
394     # Make easier/faster variables to index in the file editor
395     if 'performance' in ds_opts:
396         perf = True
397         # vpp
398         if 'vpp' in ds_opts['performance']['Compute']:
399             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
400         else:
401             perf_vpp_comp = None
402         if 'vpp' in ds_opts['performance']['Controller']:
403             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
404         else:
405             perf_vpp_ctrl = None
406
407         # ovs
408         if 'ovs' in ds_opts['performance']['Compute']:
409             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
410         else:
411             perf_ovs_comp = None
412
413         # kernel
414         if 'kernel' in ds_opts['performance']['Compute']:
415             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
416         else:
417             perf_kern_comp = None
418     else:
419         perf = False
420
421     # Modify OPNFV environment
422     # TODO: Change to build a dict and outputting yaml rather than parsing
423     for line in fileinput.input(tmp_opnfv_env, inplace=True):
424         line = line.strip('\n')
425         output_line = line
426         if 'CloudDomain' in line:
427             output_line = "  CloudDomain: {}".format(ns['domain_name'])
428         elif 'replace_private_key' in line:
429             output_line = "    private_key: |\n"
430             key_out = ''
431             for line in private_key.splitlines():
432                 key_out += "      {}\n".format(line)
433             output_line += key_out
434         elif 'replace_public_key' in line:
435             output_line = "    public_key: '{}'".format(public_key)
436         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
437                 'resource_registry' in line:
438             output_line = "resource_registry:\n" \
439                           "  OS::TripleO::NodeUserData: first-boot.yaml"
440         elif 'ComputeExtraConfigPre' in line and \
441                 ds_opts['dataplane'] == 'ovs_dpdk':
442             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
443                           './ovs-dpdk-preconfig.yaml'
444
445         if ds_opts['sdn_controller'] == 'opendaylight' and \
446                 'odl_vpp_routing_node' in ds_opts:
447             if 'opendaylight::vpp_routing_node' in line:
448                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
449                                .format(ds_opts['odl_vpp_routing_node'],
450                                        ns['domain_name']))
451         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
452             if 'NeutronVPPAgentPhysnets' in line:
453                 output_line = ("  NeutronVPPAgentPhysnets: 'datacentre:{}'".
454                                format(tenant_nic['Controller']))
455         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
456                 'dvr') is True:
457             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
458                 output_line = ''
459             elif 'NeutronDhcpAgentsPerNetwork' in line:
460                 num_control, num_compute = inv.get_node_counts()
461                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
462                                .format(num_compute))
463             elif 'ComputeServices' in line:
464                 output_line = ("  ComputeServices:\n"
465                                "    - OS::TripleO::Services::NeutronDhcpAgent")
466         # SRIOV networks are VLAN based provider networks. In order to simplify
467         # the deployment, nfv_sriov will be the default physnet. VLANs are not
468         # needed in advance, and the user will have to create the network
469         # specifying the segmentation-id.
470         if ds_opts['sriov']:
471             if 'NeutronNetworkVLANRanges' in line:
472                 output_line = ("{},nfv_sriov'".format(line[:-1]))
473
474         if perf:
475             for role in 'NovaCompute', 'Controller':
476                 if role == 'NovaCompute':
477                     perf_opts = perf_vpp_comp
478                 else:
479                     perf_opts = perf_vpp_ctrl
480                 cfg = "{}ExtraConfig".format(role)
481                 if cfg in line and perf_opts:
482                     perf_line = ''
483                     if 'main-core' in perf_opts:
484                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
485                                       .format(perf_opts['main-core']))
486                     if 'corelist-workers' in perf_opts:
487                         perf_line += ("\n    "
488                                       "fdio::vpp_cpu_corelist_workers: '{}'"
489                                       .format(perf_opts['corelist-workers']))
490                     if ds_opts['sdn_controller'] == 'opendaylight' and \
491                             ds_opts['dataplane'] == 'fdio':
492                         if role == 'NovaCompute':
493                             perf_line += ("\n    "
494                                           "tripleo::profile::base::neutron::"
495                                           "agents::honeycomb::"
496                                           "interface_role_mapping:"
497                                           " ['{}:tenant-interface',"
498                                           "'{}:public-interface']"
499                                           .format(tenant_nic[role],
500                                                   external_nic[role]))
501                         else:
502                             perf_line += ("\n    "
503                                           "tripleo::profile::base::neutron::"
504                                           "agents::honeycomb::"
505                                           "interface_role_mapping:"
506                                           " ['{}:tenant-interface']"
507                                           .format(tenant_nic[role]))
508                     if perf_line:
509                         output_line = ("  {}:{}".format(cfg, perf_line))
510
511             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
512                 for k, v in OVS_PERF_MAP.items():
513                     if k in line and v in perf_ovs_comp:
514                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
515
516             # kernel args
517             # (FIXME) use compute's kernel settings for all nodes for now.
518             if perf_kern_comp:
519                 if 'NovaSchedulerDefaultFilters' in line:
520                     output_line = \
521                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
522                         "ComputeFilter,AvailabilityZoneFilter," \
523                         "ComputeCapabilitiesFilter," \
524                         "ImagePropertiesFilter,NUMATopologyFilter'"
525                 elif 'ComputeKernelArgs' in line:
526                     kernel_args = ''
527                     for k, v in perf_kern_comp.items():
528                         kernel_args += "{}={} ".format(k, v)
529                     if kernel_args:
530                         output_line = "  ComputeKernelArgs: '{}'".\
531                             format(kernel_args)
532
533         print(output_line)
534
535     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
536
537
538 def generate_ceph_key():
539     key = os.urandom(16)
540     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
541     return base64.b64encode(header + key)
542
543
544 def prep_storage_env(ds, tmp_dir):
545     """
546     Creates storage environment file for deployment.  Source file is copied by
547     undercloud playbook to host.
548     :param ds:
549     :param tmp_dir:
550     :return:
551     """
552     ds_opts = ds['deploy_options']
553     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
554     if not os.path.isfile(storage_file):
555         logging.error("storage-environment file is not in tmp directory: {}. "
556                       "Check if file was copied from "
557                       "undercloud".format(tmp_dir))
558         raise ApexDeployException("storage-environment file not copied from "
559                                   "undercloud")
560     for line in fileinput.input(storage_file, inplace=True):
561         line = line.strip('\n')
562         if 'CephClusterFSID' in line:
563             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
564         elif 'CephMonKey' in line:
565             print("  CephMonKey: {}".format(generate_ceph_key().decode(
566                 'utf-8')))
567         elif 'CephAdminKey' in line:
568             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
569                 'utf-8')))
570         elif 'CephClientKey' in line:
571             print("  CephClientKey: {}".format(generate_ceph_key().decode(
572                 'utf-8')))
573         else:
574             print(line)
575     if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
576         with open(storage_file, 'a') as fh:
577             fh.write('  ExtraConfig:\n')
578             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
579                 ds_opts['ceph_device']
580             ))
581
582
583 def prep_sriov_env(ds, tmp_dir):
584     """
585     Creates SRIOV environment file for deployment. Source file is copied by
586     undercloud playbook to host.
587     :param ds:
588     :param tmp_dir:
589     :return:
590     """
591     ds_opts = ds['deploy_options']
592     sriov_iface = ds_opts['sriov']
593     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
594     if not os.path.isfile(sriov_file):
595         logging.error("sriov-environment file is not in tmp directory: {}. "
596                       "Check if file was copied from "
597                       "undercloud".format(tmp_dir))
598         raise ApexDeployException("sriov-environment file not copied from "
599                                   "undercloud")
600     # TODO(rnoriega): Instead of line editing, refactor this code to load
601     # yaml file into a dict, edit it and write the file back.
602     for line in fileinput.input(sriov_file, inplace=True):
603         line = line.strip('\n')
604         if 'NovaSchedulerDefaultFilters' in line:
605             print("  {}".format(line[3:]))
606         elif 'NovaSchedulerAvailableFilters' in line:
607             print("  {}".format(line[3:]))
608         elif 'NeutronPhysicalDevMappings' in line:
609             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
610                   .format(sriov_iface))
611         elif 'NeutronSriovNumVFs' in line:
612             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
613         elif 'NovaPCIPassthrough' in line:
614             print("  NovaPCIPassthrough:")
615         elif 'devname' in line:
616             print("    - devname: \"{}\"".format(sriov_iface))
617         elif 'physical_network' in line:
618             print("      physical_network: \"nfv_sriov\"")
619         else:
620             print(line)
621
622
623 def external_network_cmds(ns):
624     """
625     Generates external network openstack commands
626     :param ns: network settings
627     :return: list of commands to configure external network
628     """
629     if 'external' in ns.enabled_network_list:
630         net_config = ns['networks']['external'][0]
631         external = True
632         pool_start, pool_end = net_config['floating_ip_range']
633     else:
634         net_config = ns['networks']['admin']
635         external = False
636         pool_start, pool_end = ns['apex']['networks']['admin'][
637             'introspection_range']
638     nic_config = net_config['nic_mapping']
639     gateway = net_config['gateway']
640     cmds = list()
641     # create network command
642     if nic_config['compute']['vlan'] == 'native':
643         ext_type = 'flat'
644     else:
645         ext_type = "vlan --provider-segment {}".format(nic_config[
646                                                        'compute']['vlan'])
647     cmds.append("openstack network create external --project service "
648                 "--external --provider-network-type {} "
649                 "--provider-physical-network datacentre".format(ext_type))
650     # create subnet command
651     cidr = net_config['cidr']
652     subnet_cmd = "openstack subnet create external-subnet --project " \
653                  "service --network external --no-dhcp --gateway {} " \
654                  "--allocation-pool start={},end={} --subnet-range " \
655                  "{}".format(gateway, pool_start, pool_end, str(cidr))
656     if external and cidr.version == 6:
657         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
658                       '--ipv6-address-mode slaac'
659     cmds.append(subnet_cmd)
660     logging.debug("Neutron external network commands determined "
661                   "as: {}".format(cmds))
662     return cmds
663
664
665 def create_congress_cmds(overcloud_file):
666     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
667     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
668     logging.info("Creating congress commands")
669     try:
670         ds_cfg = [
671             "username={}".format(overcloudrc['OS_USERNAME']),
672             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
673             "password={}".format(overcloudrc['OS_PASSWORD']),
674             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
675         ]
676     except KeyError:
677         logging.error("Unable to find all keys required for congress in "
678                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
679                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
680                       "file: {}".format(overcloud_file))
681         raise
682     cmds = list()
683     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
684
685     for driver in drivers:
686         if driver == 'doctor':
687             cmd = "{} \"{}\"".format(driver, driver)
688         else:
689             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
690         if driver == 'nova':
691             cmd += ' --config api_version="2.34"'
692         logging.debug("Congress command created: {}".format(cmd))
693         cmds.append(cmd)
694     return cmds