Merge "Increasing loop device size for baremetal deployments"
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import shutil
16 import uuid
17 import struct
18 import time
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
21
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
28     crypto_serialization
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31     crypto_default_backend
32
33
34 SDN_FILE_MAP = {
35     'opendaylight': {
36         'sfc': 'neutron-sfc-opendaylight.yaml',
37         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38         'gluon': 'gluon.yaml',
39         'vpp': {
40             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42             'default': 'neutron-opendaylight-honeycomb.yaml'
43         },
44         'l2gw': 'neutron-l2gw-opendaylight.yaml',
45         'sriov': 'neutron-opendaylight-sriov.yaml',
46         'default': 'neutron-opendaylight.yaml',
47     },
48     'onos': {
49         'sfc': 'neutron-onos-sfc.yaml',
50         'default': 'neutron-onos.yaml'
51     },
52     'ovn': 'neutron-ml2-ovn.yaml',
53     False: {
54         'vpp': 'neutron-ml2-vpp.yaml',
55         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
56     }
57 }
58
59 OTHER_FILE_MAP = {
60     'tacker': 'enable_tacker.yaml',
61     'congress': 'enable_congress.yaml',
62     'barometer': 'enable_barometer.yaml',
63     'rt_kvm': 'enable_rt_kvm.yaml'
64 }
65
66 OVS_PERF_MAP = {
67     'HostCpusList': 'dpdk_cores',
68     'NeutronDpdkCoreList': 'pmd_cores',
69     'NeutronDpdkSocketMemory': 'socket_memory',
70     'NeutronDpdkMemoryChannels': 'memory_channels'
71 }
72
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95
96 def build_sdn_env_list(ds, sdn_map, env_list=None):
97     """
98     Builds a list of SDN environment files to be used in the deploy cmd.
99
100     This function recursively searches an sdn_map.  First the sdn controller is
101     matched and then the function looks for enabled features for that
102     controller to determine which environment files should be used.  By
103     default the feature will be checked if set to true in deploy settings to be
104     added to the list.  If a feature does not have a boolean value, then the
105     key and value pair to compare with are checked as a tuple (k,v).
106
107     :param ds: deploy settings
108     :param sdn_map: SDN map to recursively search
109     :param env_list: recursive var to hold previously found env_list
110     :return: A list of env files
111     """
112     if env_list is None:
113         env_list = list()
114     for k, v in sdn_map.items():
115         if ds['sdn_controller'] == k or (k in ds and ds[k]):
116             if isinstance(v, dict):
117                 # Append default SDN env file first
118                 # The assumption is that feature-enabled SDN env files
119                 # override and do not conflict with previously set default
120                 # settings
121                 if ds['sdn_controller'] == k and 'default' in v:
122                     env_list.append(os.path.join(con.THT_ENV_DIR,
123                                                  v['default']))
124                 env_list.extend(build_sdn_env_list(ds, v))
125             # check if the value is not a boolean
126             elif isinstance(v, tuple):
127                     if ds[k] == v[0]:
128                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
129             else:
130                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
131     if len(env_list) == 0:
132         try:
133             env_list.append(os.path.join(
134                 con.THT_ENV_DIR, sdn_map['default']))
135         except KeyError:
136             logging.warning("Unable to find default file for SDN")
137
138     return env_list
139
140
141 def get_docker_sdn_file(ds_opts):
142     """
143     Returns docker env file for detected SDN
144     :param ds_opts: deploy options
145     :return: docker THT env file for an SDN
146     """
147     # FIXME(trozet): We assume right now there is only one docker SDN file
148     docker_services = con.VALID_DOCKER_SERVICES
149     tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
150     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
151     for sdn_file in sdn_env_list:
152         sdn_base = os.path.basename(sdn_file)
153         if sdn_base in docker_services:
154             if docker_services[sdn_base] is not None:
155                 return os.path.join(tht_dir,
156                                     docker_services[sdn_base])
157             else:
158                 return os.path.join(tht_dir, sdn_base)
159
160
161 def create_deploy_cmd(ds, ns, inv, tmp_dir,
162                       virtual, env_file='opnfv-environment.yaml',
163                       net_data=False):
164
165     logging.info("Creating deployment command")
166     deploy_options = ['network-environment.yaml']
167
168     ds_opts = ds['deploy_options']
169
170     if ds_opts['containers']:
171         deploy_options.append(os.path.join(con.THT_ENV_DIR,
172                                            'docker.yaml'))
173
174     if ds['global_params']['ha_enabled']:
175         if ds_opts['containers']:
176             deploy_options.append(os.path.join(con.THT_ENV_DIR,
177                                                'docker-ha.yaml'))
178         else:
179             deploy_options.append(os.path.join(con.THT_ENV_DIR,
180                                                'puppet-pacemaker.yaml'))
181
182     if env_file:
183         deploy_options.append(env_file)
184
185     if ds_opts['containers']:
186         deploy_options.append('docker-images.yaml')
187         sdn_docker_file = get_docker_sdn_file(ds_opts)
188         if sdn_docker_file:
189             deploy_options.append(sdn_docker_file)
190             deploy_options.append('sdn-images.yaml')
191     else:
192         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
193
194     for k, v in OTHER_FILE_MAP.items():
195         if k in ds_opts and ds_opts[k]:
196             if ds_opts['containers']:
197                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
198                                                    "{}.yaml".format(k)))
199             else:
200                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
201
202     if ds_opts['ceph'] and 'csit' not in env_file:
203         prep_storage_env(ds, ns, virtual, tmp_dir)
204         deploy_options.append(os.path.join(con.THT_ENV_DIR,
205                                            'storage-environment.yaml'))
206     if ds_opts['sriov']:
207         prep_sriov_env(ds, tmp_dir)
208
209     # Check for 'k8s' here intentionally, as we may support other values
210     # such as openstack/openshift for 'vim' option.
211     if ds_opts['vim'] == 'k8s':
212         deploy_options.append('kubernetes-environment.yaml')
213
214     if virtual:
215         deploy_options.append('virtual-environment.yaml')
216     else:
217         deploy_options.append('baremetal-environment.yaml')
218
219     num_control, num_compute = inv.get_node_counts()
220     if num_control == 0 or num_compute == 0:
221         logging.error("Detected 0 control or compute nodes.  Control nodes: "
222                       "{}, compute nodes{}".format(num_control, num_compute))
223         raise ApexDeployException("Invalid number of control or computes")
224     elif num_control > 1 and not ds['global_params']['ha_enabled']:
225         num_control = 1
226     if platform.machine() == 'aarch64':
227         # aarch64 deploys were not completing in the default 90 mins.
228         # Not sure if this is related to the hardware the OOO support
229         # was developed on or the virtualization support in CentOS
230         # Either way it will probably get better over time  as the aarch
231         # support matures in CentOS and deploy time should be tested in
232         # the future so this multiplier can be removed.
233         con.DEPLOY_TIMEOUT *= 2
234     cmd = "openstack overcloud deploy --templates --timeout {} " \
235           .format(con.DEPLOY_TIMEOUT)
236     # build cmd env args
237     for option in deploy_options:
238         cmd += " -e {}".format(option)
239     cmd += " --ntp-server {}".format(ns['ntp'][0])
240     cmd += " --control-scale {}".format(num_control)
241     cmd += " --compute-scale {}".format(num_compute)
242     cmd += ' --control-flavor control --compute-flavor compute'
243     if net_data:
244         cmd += ' --networks-file network_data.yaml'
245     libvirt_type = 'kvm'
246     if virtual:
247         with open('/sys/module/kvm_intel/parameters/nested') as f:
248             nested_kvm = f.read().strip()
249             if nested_kvm != 'Y':
250                 libvirt_type = 'qemu'
251     cmd += ' --libvirt-type {}'.format(libvirt_type)
252     logging.info("Deploy command set: {}".format(cmd))
253
254     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
255         fh.write(cmd)
256     return cmd
257
258
259 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
260                patches=None):
261     """
262     Locates sdn image and preps for deployment.
263     :param ds: deploy settings
264     :param ns: network settings
265     :param img: sdn image
266     :param tmp_dir: dir to store modified sdn image
267     :param root_pw: password to configure for overcloud image
268     :param docker_tag: Docker image tag for RDO version (default None)
269     :param patches: List of patches to apply to overcloud image
270     :return: None
271     """
272     # TODO(trozet): Come up with a better way to organize this logic in this
273     # function
274     logging.info("Preparing image: {} for deployment".format(img))
275     if not os.path.isfile(img):
276         logging.error("Missing SDN image {}".format(img))
277         raise ApexDeployException("Missing SDN image file: {}".format(img))
278
279     ds_opts = ds['deploy_options']
280     virt_cmds = list()
281     sdn = ds_opts['sdn_controller']
282     patched_containers = set()
283     # we need this due to rhbz #1436021
284     # fixed in systemd-219-37.el7
285     if sdn is not False:
286         logging.info("Neutron openvswitch-agent disabled")
287         virt_cmds.extend([{
288             con.VIRT_RUN_CMD:
289                 "rm -f /etc/systemd/system/multi-user.target.wants/"
290                 "neutron-openvswitch-agent.service"},
291             {
292             con.VIRT_RUN_CMD:
293                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
294                 ".service"
295         }])
296
297     if ns.get('http_proxy', ''):
298         virt_cmds.append({
299             con.VIRT_RUN_CMD:
300                 "echo 'http_proxy={}' >> /etc/environment".format(
301                     ns['http_proxy'])})
302
303     if ns.get('https_proxy', ''):
304         virt_cmds.append({
305             con.VIRT_RUN_CMD:
306                 "echo 'https_proxy={}' >> /etc/environment".format(
307                     ns['https_proxy'])})
308
309     if ds_opts['vpn']:
310         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
311         virt_cmds.append({
312             con.VIRT_RUN_CMD:
313                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
314                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
315         virt_cmds.append({
316             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
317                               "zrpcd_start.sh"})
318         virt_cmds.append({
319             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
320                               "init.d/zrpcd_start.sh' /etc/rc.local "})
321         virt_cmds.append({
322             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
323                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
324         logging.info("ZRPCD process started")
325
326     dataplane = ds_opts['dataplane']
327     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
328         logging.info("Enabling kernel modules for dpdk")
329         # file to module mapping
330         uio_types = {
331             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
332             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
333         }
334         for mod_file, mod in uio_types.items():
335             with open(mod_file, 'w') as fh:
336                 fh.write('#!/bin/bash\n')
337                 fh.write('exec /sbin/modprobe {}'.format(mod))
338                 fh.close()
339
340             virt_cmds.extend([
341                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
342                     mod_file)},
343                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
344                                    "{}".format(os.path.basename(mod_file))}
345             ])
346     if root_pw:
347         pw_op = "password:{}".format(root_pw)
348         virt_cmds.append({con.VIRT_PW: pw_op})
349
350     if dataplane == 'ovs':
351         if ds_opts['sfc']:
352             virt_cmds.extend([
353                 {con.VIRT_RUN_CMD: "yum -y install "
354                                    "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
355                                    "{}".format(OVS_NSH_KMOD_RPM)},
356                 {con.VIRT_RUN_CMD: "yum downgrade -y "
357                                    "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
358                                    "{}".format(OVS_NSH_RPM)}
359             ])
360         elif sdn == 'opendaylight':
361             # FIXME(trozet) remove this after RDO is updated with fix for
362             # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
363             ovs_file = os.path.basename(con.CUSTOM_OVS)
364             ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
365             utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
366                                             targets=[ovs_file])
367             virt_cmds.extend([
368                 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
369                                                                   ovs_file))},
370                 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
371                     ovs_file)}
372             ])
373     if dataplane == 'fdio':
374         # Patch neutron with using OVS external interface for router
375         # and add generic linux NS interface driver
376         virt_cmds.append(
377             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
378                                "-p1 < neutron-patch-NSDriver.patch"})
379         if sdn is False:
380             virt_cmds.extend([
381                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
382                 {con.VIRT_RUN_CMD: "yum install -y "
383                                    "/root/nosdn_vpp_rpms/*.rpm"}
384             ])
385
386     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
387     shutil.copyfile(img, tmp_oc_image)
388     logging.debug("Temporary overcloud image stored as: {}".format(
389         tmp_oc_image))
390
391     if sdn == 'opendaylight':
392         undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
393             'installer_vm']['ip']
394         oc_builder.inject_opendaylight(
395             odl_version=ds_opts['odl_version'],
396             image=tmp_oc_image,
397             tmp_dir=tmp_dir,
398             uc_ip=undercloud_admin_ip,
399             os_version=ds_opts['os_version'],
400             docker_tag=docker_tag,
401         )
402         if docker_tag:
403             patched_containers = patched_containers.union({'opendaylight'})
404
405     if patches:
406         if ds_opts['os_version'] == 'master':
407             branch = ds_opts['os_version']
408         else:
409             branch = "stable/{}".format(ds_opts['os_version'])
410         logging.info('Adding patches to overcloud')
411         patched_containers = patched_containers.union(
412             c_builder.add_upstream_patches(patches,
413                                            tmp_oc_image, tmp_dir,
414                                            branch,
415                                            uc_ip=undercloud_admin_ip,
416                                            docker_tag=docker_tag))
417     # if containers with ceph, and no ceph device we need to use a
418     # persistent loop device for Ceph OSDs
419     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
420         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
421         with open(tmp_losetup, 'w') as fh:
422             fh.write(LOSETUP_SERVICE)
423         virt_cmds.extend([
424             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
425              },
426             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
427                 .format(LOOP_DEVICE_SIZE)},
428             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
429             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
430         ])
431     # TODO(trozet) remove this after LP#173474 is fixed
432     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
433     virt_cmds.append(
434         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
435                            "ConditionPathExists".format(dhcp_unit)})
436     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
437     logging.info("Overcloud image customization complete")
438     return patched_containers
439
440
441 def make_ssh_key():
442     """
443     Creates public and private ssh keys with 1024 bit RSA encryption
444     :return: private, public key
445     """
446     key = rsa.generate_private_key(
447         backend=crypto_default_backend(),
448         public_exponent=65537,
449         key_size=1024
450     )
451
452     private_key = key.private_bytes(
453         crypto_serialization.Encoding.PEM,
454         crypto_serialization.PrivateFormat.PKCS8,
455         crypto_serialization.NoEncryption())
456     public_key = key.public_key().public_bytes(
457         crypto_serialization.Encoding.OpenSSH,
458         crypto_serialization.PublicFormat.OpenSSH
459     )
460     return private_key.decode('utf-8'), public_key.decode('utf-8')
461
462
463 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
464     """
465     Creates modified opnfv/network environments for deployment
466     :param ds: deploy settings
467     :param ns: network settings
468     :param inv: node inventory
469     :param opnfv_env: file path for opnfv-environment file
470     :param net_env: file path for network-environment file
471     :param tmp_dir: Apex tmp dir
472     :return:
473     """
474
475     logging.info("Preparing opnfv-environment and network-environment files")
476     ds_opts = ds['deploy_options']
477     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
478     shutil.copyfile(opnfv_env, tmp_opnfv_env)
479     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
480     tenant_nic = dict()
481     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
482     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
483     external_nic_map = ns['networks']['external'][0]['nic_mapping']
484     external_nic = dict()
485     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
486
487     # SSH keys
488     private_key, public_key = make_ssh_key()
489
490     # Make easier/faster variables to index in the file editor
491     if 'performance' in ds_opts:
492         perf = True
493         # vpp
494         if 'vpp' in ds_opts['performance']['Compute']:
495             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
496         else:
497             perf_vpp_comp = None
498         if 'vpp' in ds_opts['performance']['Controller']:
499             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
500         else:
501             perf_vpp_ctrl = None
502
503         # ovs
504         if 'ovs' in ds_opts['performance']['Compute']:
505             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
506         else:
507             perf_ovs_comp = None
508
509         # kernel
510         if 'kernel' in ds_opts['performance']['Compute']:
511             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
512         else:
513             perf_kern_comp = None
514     else:
515         perf = False
516
517     tenant_settings = ns['networks']['tenant']
518     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
519         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
520
521     # Modify OPNFV environment
522     # TODO: Change to build a dict and outputting yaml rather than parsing
523     for line in fileinput.input(tmp_opnfv_env, inplace=True):
524         line = line.strip('\n')
525         output_line = line
526         if 'CloudDomain' in line:
527             output_line = "  CloudDomain: {}".format(ns['domain_name'])
528         elif 'replace_private_key' in line:
529             output_line = "    private_key: |\n"
530             key_out = ''
531             for line in private_key.splitlines():
532                 key_out += "      {}\n".format(line)
533             output_line += key_out
534         elif 'replace_public_key' in line:
535             output_line = "    public_key: '{}'".format(public_key)
536         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
537                 'resource_registry' in line:
538             output_line = "resource_registry:\n" \
539                           "  OS::TripleO::NodeUserData: first-boot.yaml"
540         elif 'ComputeExtraConfigPre' in line and \
541                 ds_opts['dataplane'] == 'ovs_dpdk':
542             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
543                           './ovs-dpdk-preconfig.yaml'
544         elif 'NeutronNetworkVLANRanges' in line:
545             vlan_setting = ''
546             if tenant_vlan_enabled:
547                 if ns['networks']['tenant']['overlay_id_range']:
548                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
549                     if 'datacentre' not in vlan_setting:
550                         vlan_setting += ',datacentre:1:1000'
551             # SRIOV networks are VLAN based provider networks. In order to
552             # simplify the deployment, nfv_sriov will be the default physnet.
553             # VLANs are not needed in advance, and the user will have to create
554             # the network specifying the segmentation-id.
555             if ds_opts['sriov']:
556                 if vlan_setting:
557                     vlan_setting += ",nfv_sriov"
558                 else:
559                     vlan_setting = "datacentre:1:1000,nfv_sriov"
560             if vlan_setting:
561                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
562         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
563             if tenant_settings['overlay_id_range']:
564                 physnets = tenant_settings['overlay_id_range'].split(',')
565                 output_line = "  NeutronBridgeMappings: "
566                 for physnet in physnets:
567                     physnet_name = physnet.split(':')[0]
568                     if physnet_name != 'datacentre':
569                         output_line += "{}:br-vlan,".format(physnet_name)
570                 output_line += "datacentre:br-ex"
571         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
572                 and ds_opts['sdn_controller'] == 'opendaylight':
573             if tenant_settings['overlay_id_range']:
574                 physnets = tenant_settings['overlay_id_range'].split(',')
575                 output_line = "  OpenDaylightProviderMappings: "
576                 for physnet in physnets:
577                     physnet_name = physnet.split(':')[0]
578                     if physnet_name != 'datacentre':
579                         output_line += "{}:br-vlan,".format(physnet_name)
580                 output_line += "datacentre:br-ex"
581         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
582             output_line = "  NeutronNetworkType: vlan\n" \
583                           "  NeutronTunnelTypes: ''"
584
585         if ds_opts['sdn_controller'] == 'opendaylight' and \
586                 'odl_vpp_routing_node' in ds_opts:
587             if 'opendaylight::vpp_routing_node' in line:
588                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
589                                .format(ds_opts['odl_vpp_routing_node'],
590                                        ns['domain_name']))
591         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
592             if 'NeutronVPPAgentPhysnets' in line:
593                 # VPP interface tap0 will be used for external network
594                 # connectivity.
595                 output_line = ("  NeutronVPPAgentPhysnets: "
596                                "'datacentre:{},external:tap0'"
597                                .format(tenant_nic['Controller']))
598         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
599                 'dvr') is True:
600             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
601                 output_line = ''
602             elif 'NeutronDhcpAgentsPerNetwork' in line:
603                 num_control, num_compute = inv.get_node_counts()
604                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
605                                .format(num_compute))
606             elif 'ComputeServices' in line:
607                 output_line = ("  ComputeServices:\n"
608                                "    - OS::TripleO::Services::NeutronDhcpAgent")
609
610         if perf:
611             for role in 'NovaCompute', 'Controller':
612                 if role == 'NovaCompute':
613                     perf_opts = perf_vpp_comp
614                 else:
615                     perf_opts = perf_vpp_ctrl
616                 cfg = "{}ExtraConfig".format(role)
617                 if cfg in line and perf_opts:
618                     perf_line = ''
619                     if 'main-core' in perf_opts:
620                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
621                                       .format(perf_opts['main-core']))
622                     if 'corelist-workers' in perf_opts:
623                         perf_line += ("\n    "
624                                       "fdio::vpp_cpu_corelist_workers: '{}'"
625                                       .format(perf_opts['corelist-workers']))
626                     if ds_opts['sdn_controller'] == 'opendaylight' and \
627                             ds_opts['dataplane'] == 'fdio':
628                         if role == 'NovaCompute':
629                             perf_line += ("\n    "
630                                           "tripleo::profile::base::neutron::"
631                                           "agents::honeycomb::"
632                                           "interface_role_mapping:"
633                                           " ['{}:tenant-interface',"
634                                           "'{}:public-interface']"
635                                           .format(tenant_nic[role],
636                                                   external_nic[role]))
637                         else:
638                             perf_line += ("\n    "
639                                           "tripleo::profile::base::neutron::"
640                                           "agents::honeycomb::"
641                                           "interface_role_mapping:"
642                                           " ['{}:tenant-interface']"
643                                           .format(tenant_nic[role]))
644                     if perf_line:
645                         output_line = ("  {}:{}".format(cfg, perf_line))
646
647             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
648                 for k, v in OVS_PERF_MAP.items():
649                     if k in line and v in perf_ovs_comp:
650                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
651
652             # kernel args
653             # (FIXME) use compute's kernel settings for all nodes for now.
654             if perf_kern_comp:
655                 if 'NovaSchedulerDefaultFilters' in line:
656                     output_line = \
657                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
658                         "ComputeFilter,AvailabilityZoneFilter," \
659                         "ComputeCapabilitiesFilter," \
660                         "ImagePropertiesFilter,NUMATopologyFilter'"
661                 elif 'ComputeKernelArgs' in line:
662                     kernel_args = ''
663                     for k, v in perf_kern_comp.items():
664                         kernel_args += "{}={} ".format(k, v)
665                     if kernel_args:
666                         output_line = "  ComputeKernelArgs: '{}'".\
667                             format(kernel_args)
668
669         print(output_line)
670
671     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
672
673
674 def generate_ceph_key():
675     key = os.urandom(16)
676     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
677     return base64.b64encode(header + key)
678
679
680 def prep_storage_env(ds, ns, virtual, tmp_dir):
681     """
682     Creates storage environment file for deployment.  Source file is copied by
683     undercloud playbook to host.
684     :param ds:
685     :param ns:
686     :param virtual:
687     :param tmp_dir:
688     :return:
689     """
690     ds_opts = ds['deploy_options']
691     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
692     if not os.path.isfile(storage_file):
693         logging.error("storage-environment file is not in tmp directory: {}. "
694                       "Check if file was copied from "
695                       "undercloud".format(tmp_dir))
696         raise ApexDeployException("storage-environment file not copied from "
697                                   "undercloud")
698     for line in fileinput.input(storage_file, inplace=True):
699         line = line.strip('\n')
700         if 'CephClusterFSID' in line:
701             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
702         elif 'CephMonKey' in line:
703             print("  CephMonKey: {}".format(generate_ceph_key().decode(
704                 'utf-8')))
705         elif 'CephAdminKey' in line:
706             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
707                 'utf-8')))
708         elif 'CephClientKey' in line:
709             print("  CephClientKey: {}".format(generate_ceph_key().decode(
710                 'utf-8')))
711         else:
712             print(line)
713
714     if ds_opts['containers']:
715         ceph_params = {}
716
717         # max pgs allowed are calculated as num_mons * 200. Therefore we
718         # set number of pgs and pools so that the total will be less:
719         # num_pgs * num_pools * num_osds
720         ceph_params['CephPoolDefaultSize'] = 2
721         ceph_params['CephPoolDefaultPgNum'] = 32
722         if virtual:
723             ceph_params['CephAnsibleExtraConfig'] = {
724                 'centos_package_dependencies': [],
725                 'ceph_osd_docker_memory_limit': '1g',
726                 'ceph_mds_docker_memory_limit': '1g',
727             }
728         ceph_device = ds_opts['ceph_device']
729         ceph_params['CephAnsibleDisksConfig'] = {
730             'devices': [ceph_device],
731             'journal_size': 512,
732             'osd_scenario': 'collocated'
733         }
734         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
735     # TODO(trozet): remove following block as we only support containers now
736     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
737         with open(storage_file, 'a') as fh:
738             fh.write('  ExtraConfig:\n')
739             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
740                 ds_opts['ceph_device']
741             ))
742
743
744 def prep_sriov_env(ds, tmp_dir):
745     """
746     Creates SRIOV environment file for deployment. Source file is copied by
747     undercloud playbook to host.
748     :param ds:
749     :param tmp_dir:
750     :return:
751     """
752     ds_opts = ds['deploy_options']
753     sriov_iface = ds_opts['sriov']
754     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
755     if not os.path.isfile(sriov_file):
756         logging.error("sriov-environment file is not in tmp directory: {}. "
757                       "Check if file was copied from "
758                       "undercloud".format(tmp_dir))
759         raise ApexDeployException("sriov-environment file not copied from "
760                                   "undercloud")
761     # TODO(rnoriega): Instead of line editing, refactor this code to load
762     # yaml file into a dict, edit it and write the file back.
763     for line in fileinput.input(sriov_file, inplace=True):
764         line = line.strip('\n')
765         if 'NovaSchedulerDefaultFilters' in line:
766             print("  {}".format(line[3:]))
767         elif 'NovaSchedulerAvailableFilters' in line:
768             print("  {}".format(line[3:]))
769         elif 'NeutronPhysicalDevMappings' in line:
770             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
771                   .format(sriov_iface))
772         elif 'NeutronSriovNumVFs' in line:
773             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
774         elif 'NovaPCIPassthrough' in line:
775             print("  NovaPCIPassthrough:")
776         elif 'devname' in line:
777             print("    - devname: \"{}\"".format(sriov_iface))
778         elif 'physical_network' in line:
779             print("      physical_network: \"nfv_sriov\"")
780         else:
781             print(line)
782
783
784 def external_network_cmds(ns, ds):
785     """
786     Generates external network openstack commands
787     :param ns: network settings
788     :param ds: deploy settings
789     :return: list of commands to configure external network
790     """
791     ds_opts = ds['deploy_options']
792     external_physnet = 'datacentre'
793     if ds_opts['dataplane'] == 'fdio' and \
794        ds_opts['sdn_controller'] != 'opendaylight':
795         external_physnet = 'external'
796     if 'external' in ns.enabled_network_list:
797         net_config = ns['networks']['external'][0]
798         external = True
799         pool_start, pool_end = net_config['floating_ip_range']
800     else:
801         net_config = ns['networks']['admin']
802         external = False
803         pool_start, pool_end = ns['apex']['networks']['admin'][
804             'introspection_range']
805     nic_config = net_config['nic_mapping']
806     gateway = net_config['gateway']
807     cmds = list()
808     # create network command
809     if nic_config['compute']['vlan'] == 'native':
810         ext_type = 'flat'
811     else:
812         ext_type = "vlan --provider-segment {}".format(nic_config[
813                                                        'compute']['vlan'])
814     cmds.append("openstack network create external --project service "
815                 "--external --provider-network-type {} "
816                 "--provider-physical-network {}"
817                 .format(ext_type, external_physnet))
818     # create subnet command
819     cidr = net_config['cidr']
820     subnet_cmd = "openstack subnet create external-subnet --project " \
821                  "service --network external --no-dhcp --gateway {} " \
822                  "--allocation-pool start={},end={} --subnet-range " \
823                  "{}".format(gateway, pool_start, pool_end, str(cidr))
824     if external and cidr.version == 6:
825         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
826                       '--ipv6-address-mode slaac'
827     cmds.append(subnet_cmd)
828     logging.debug("Neutron external network commands determined "
829                   "as: {}".format(cmds))
830     return cmds
831
832
833 def create_congress_cmds(overcloud_file):
834     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
835     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
836     logging.info("Creating congress commands")
837     try:
838         ds_cfg = [
839             "username={}".format(overcloudrc['OS_USERNAME']),
840             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
841             "password={}".format(overcloudrc['OS_PASSWORD']),
842             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
843         ]
844     except KeyError:
845         logging.error("Unable to find all keys required for congress in "
846                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
847                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
848                       "file: {}".format(overcloud_file))
849         raise
850     cmds = list()
851     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
852
853     for driver in drivers:
854         if driver == 'doctor':
855             cmd = "{} \"{}\"".format(driver, driver)
856         else:
857             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
858         if driver == 'nova':
859             cmd += ' --config api_version="2.34"'
860         logging.debug("Congress command created: {}".format(cmd))
861         cmds.append(cmd)
862     return cmds