e3177065abe4ec3b6cde3bf0cdf8786c4b8eca11
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import pprint
16 import shutil
17 import uuid
18 import struct
19 import time
20 import yaml
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
23
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
30     crypto_serialization
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33     crypto_default_backend
34
35
36 SDN_FILE_MAP = {
37     'opendaylight': {
38         'sfc': 'neutron-sfc-opendaylight.yaml',
39         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40         'gluon': 'gluon.yaml',
41         'vpp': {
42             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44             'default': 'neutron-opendaylight-honeycomb.yaml'
45         },
46         'l2gw': 'neutron-l2gw-opendaylight.yaml',
47         'sriov': 'neutron-opendaylight-sriov.yaml',
48         'default': 'neutron-opendaylight.yaml',
49     },
50     'onos': {
51         'sfc': 'neutron-onos-sfc.yaml',
52         'default': 'neutron-onos.yaml'
53     },
54     'ovn': 'neutron-ml2-ovn.yaml',
55     False: {
56         'vpp': 'neutron-ml2-vpp.yaml',
57         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
58     }
59 }
60
61 OTHER_FILE_MAP = {
62     'tacker': 'enable_tacker.yaml',
63     'congress': 'enable_congress.yaml',
64     'barometer': 'enable_barometer.yaml',
65     'rt_kvm': 'enable_rt_kvm.yaml'
66 }
67
68 OVS_PERF_MAP = {
69     'HostCpusList': 'dpdk_cores',
70     'NeutronDpdkCoreList': 'pmd_cores',
71     'NeutronDpdkSocketMemory': 'socket_memory',
72     'NeutronDpdkMemoryChannels': 'memory_channels'
73 }
74
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95 DUPLICATE_COMPUTE_SERVICES = [
96     'OS::TripleO::Services::ComputeNeutronCorePlugin',
97     'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98     'OS::TripleO::Services::ComputeNeutronOvsAgent',
99     'OS::TripleO::Services::ComputeNeutronL3Agent'
100 ]
101
102
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
104     """
105     Builds a list of SDN environment files to be used in the deploy cmd.
106
107     This function recursively searches an sdn_map.  First the sdn controller is
108     matched and then the function looks for enabled features for that
109     controller to determine which environment files should be used.  By
110     default the feature will be checked if set to true in deploy settings to be
111     added to the list.  If a feature does not have a boolean value, then the
112     key and value pair to compare with are checked as a tuple (k,v).
113
114     :param ds: deploy settings
115     :param sdn_map: SDN map to recursively search
116     :param env_list: recursive var to hold previously found env_list
117     :return: A list of env files
118     """
119     if env_list is None:
120         env_list = list()
121     for k, v in sdn_map.items():
122         if ds['sdn_controller'] == k or (k in ds and ds[k]):
123             if isinstance(v, dict):
124                 # Append default SDN env file first
125                 # The assumption is that feature-enabled SDN env files
126                 # override and do not conflict with previously set default
127                 # settings
128                 if ds['sdn_controller'] == k and 'default' in v:
129                     env_list.append(os.path.join(con.THT_ENV_DIR,
130                                                  v['default']))
131                 env_list.extend(build_sdn_env_list(ds, v))
132             # check if the value is not a boolean
133             elif isinstance(v, tuple):
134                     if ds[k] == v[0]:
135                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
136             else:
137                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138     if len(env_list) == 0:
139         try:
140             env_list.append(os.path.join(
141                 con.THT_ENV_DIR, sdn_map['default']))
142         except KeyError:
143             logging.warning("Unable to find default file for SDN")
144
145     return env_list
146
147
148 def get_docker_sdn_files(ds_opts):
149     """
150     Returns docker env file for detected SDN
151     :param ds_opts: deploy options
152     :return: list of docker THT env files for an SDN
153     """
154     docker_services = con.VALID_DOCKER_SERVICES
155     tht_dir = con.THT_DOCKER_ENV_DIR
156     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157     for i, sdn_file in enumerate(sdn_env_list):
158         sdn_base = os.path.basename(sdn_file)
159         if sdn_base in docker_services:
160             if docker_services[sdn_base] is not None:
161                 sdn_env_list[i] = \
162                     os.path.join(tht_dir, docker_services[sdn_base])
163             else:
164                 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
165     return sdn_env_list
166
167
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169                       virtual, env_file='opnfv-environment.yaml',
170                       net_data=False):
171
172     logging.info("Creating deployment command")
173     deploy_options = ['network-environment.yaml']
174
175     ds_opts = ds['deploy_options']
176
177     if ds_opts['containers']:
178         deploy_options.append(os.path.join(con.THT_ENV_DIR,
179                                            'docker.yaml'))
180
181     if ds['global_params']['ha_enabled']:
182         if ds_opts['containers']:
183             deploy_options.append(os.path.join(con.THT_ENV_DIR,
184                                                'docker-ha.yaml'))
185         else:
186             deploy_options.append(os.path.join(con.THT_ENV_DIR,
187                                                'puppet-pacemaker.yaml'))
188
189     if env_file:
190         deploy_options.append(env_file)
191
192     if ds_opts['containers']:
193         deploy_options.append('docker-images.yaml')
194         sdn_docker_files = get_docker_sdn_files(ds_opts)
195         for sdn_docker_file in sdn_docker_files:
196             deploy_options.append(sdn_docker_file)
197     else:
198         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
199
200     for k, v in OTHER_FILE_MAP.items():
201         if k in ds_opts and ds_opts[k]:
202             if ds_opts['containers']:
203                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
204                                                    "{}.yaml".format(k)))
205             else:
206                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
207
208     if ds_opts['ceph'] and 'csit' not in env_file:
209         prep_storage_env(ds, ns, virtual, tmp_dir)
210         deploy_options.append(os.path.join(con.THT_ENV_DIR,
211                                            'storage-environment.yaml'))
212     if ds_opts['sriov']:
213         prep_sriov_env(ds, tmp_dir)
214
215     # Check for 'k8s' here intentionally, as we may support other values
216     # such as openstack/openshift for 'vim' option.
217     if ds_opts['vim'] == 'k8s':
218         deploy_options.append('kubernetes-environment.yaml')
219
220     if virtual:
221         deploy_options.append('virtual-environment.yaml')
222     else:
223         deploy_options.append('baremetal-environment.yaml')
224
225     num_control, num_compute = inv.get_node_counts()
226     if num_control > 1 and not ds['global_params']['ha_enabled']:
227         num_control = 1
228     if platform.machine() == 'aarch64':
229         # aarch64 deploys were not completing in the default 90 mins.
230         # Not sure if this is related to the hardware the OOO support
231         # was developed on or the virtualization support in CentOS
232         # Either way it will probably get better over time  as the aarch
233         # support matures in CentOS and deploy time should be tested in
234         # the future so this multiplier can be removed.
235         con.DEPLOY_TIMEOUT *= 2
236     cmd = "openstack overcloud deploy --templates --timeout {} " \
237           .format(con.DEPLOY_TIMEOUT)
238     # build cmd env args
239     for option in deploy_options:
240         cmd += " -e {}".format(option)
241     cmd += " --ntp-server {}".format(ns['ntp'][0])
242     cmd += " --control-scale {}".format(num_control)
243     cmd += " --compute-scale {}".format(num_compute)
244     cmd += ' --control-flavor control --compute-flavor compute'
245     if net_data:
246         cmd += ' --networks-file network_data.yaml'
247     libvirt_type = 'kvm'
248     if virtual and (platform.machine() != 'aarch64'):
249         with open('/sys/module/kvm_intel/parameters/nested') as f:
250             nested_kvm = f.read().strip()
251             if nested_kvm != 'Y':
252                 libvirt_type = 'qemu'
253     elif virtual and (platform.machine() == 'aarch64'):
254         libvirt_type = 'qemu'
255     cmd += ' --libvirt-type {}'.format(libvirt_type)
256     if platform.machine() == 'aarch64':
257         cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
258     logging.info("Deploy command set: {}".format(cmd))
259
260     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
261         fh.write(cmd)
262     return cmd
263
264
265 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
266                patches=None):
267     """
268     Locates sdn image and preps for deployment.
269     :param ds: deploy settings
270     :param ns: network settings
271     :param img: sdn image
272     :param tmp_dir: dir to store modified sdn image
273     :param root_pw: password to configure for overcloud image
274     :param docker_tag: Docker image tag for RDO version (default None)
275     :param patches: List of patches to apply to overcloud image
276     :return: None
277     """
278     # TODO(trozet): Come up with a better way to organize this logic in this
279     # function
280     logging.info("Preparing image: {} for deployment".format(img))
281     if not os.path.isfile(img):
282         logging.error("Missing SDN image {}".format(img))
283         raise ApexDeployException("Missing SDN image file: {}".format(img))
284
285     ds_opts = ds['deploy_options']
286     virt_cmds = list()
287     sdn = ds_opts['sdn_controller']
288     patched_containers = set()
289     # we need this due to rhbz #1436021
290     # fixed in systemd-219-37.el7
291     if sdn is not False:
292         logging.info("Neutron openvswitch-agent disabled")
293         virt_cmds.extend([{
294             con.VIRT_RUN_CMD:
295                 "rm -f /etc/systemd/system/multi-user.target.wants/"
296                 "neutron-openvswitch-agent.service"},
297             {
298             con.VIRT_RUN_CMD:
299                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
300                 ".service"
301         }])
302
303     if ns.get('http_proxy', ''):
304         virt_cmds.append({
305             con.VIRT_RUN_CMD:
306                 "echo 'http_proxy={}' >> /etc/environment".format(
307                     ns['http_proxy'])})
308
309     if ns.get('https_proxy', ''):
310         virt_cmds.append({
311             con.VIRT_RUN_CMD:
312                 "echo 'https_proxy={}' >> /etc/environment".format(
313                     ns['https_proxy'])})
314
315     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
316     shutil.copyfile(img, tmp_oc_image)
317     logging.debug("Temporary overcloud image stored as: {}".format(
318         tmp_oc_image))
319
320     if ds_opts['vpn']:
321         oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
322         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
323         virt_cmds.append({
324             con.VIRT_RUN_CMD:
325                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
326                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
327         virt_cmds.append({
328             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
329                               "zrpcd_start.sh"})
330         virt_cmds.append({
331             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
332                               "init.d/zrpcd_start.sh' /etc/rc.local "})
333         virt_cmds.append({
334             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
335                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
336         logging.info("ZRPCD process started")
337
338     dataplane = ds_opts['dataplane']
339     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
340         logging.info("Enabling kernel modules for dpdk")
341         # file to module mapping
342         uio_types = {
343             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
344             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
345         }
346         for mod_file, mod in uio_types.items():
347             with open(mod_file, 'w') as fh:
348                 fh.write('#!/bin/bash\n')
349                 fh.write('exec /sbin/modprobe {}'.format(mod))
350                 fh.close()
351
352             virt_cmds.extend([
353                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
354                     mod_file)},
355                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
356                                    "{}".format(os.path.basename(mod_file))}
357             ])
358     if root_pw:
359         pw_op = "password:{}".format(root_pw)
360         virt_cmds.append({con.VIRT_PW: pw_op})
361
362     if dataplane == 'ovs':
363         if ds_opts['sfc']:
364             oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
365         elif sdn == 'opendaylight':
366             # FIXME(trozet) remove this after RDO is updated with fix for
367             # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
368             ovs_file = os.path.basename(con.CUSTOM_OVS)
369             ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
370             utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
371                                             targets=[ovs_file])
372             virt_cmds.extend([
373                 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
374                                                                   ovs_file))},
375                 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
376                     ovs_file)}
377             ])
378
379     if dataplane == 'fdio':
380         # Patch neutron with using OVS external interface for router
381         # and add generic linux NS interface driver
382         virt_cmds.append(
383             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
384                                "-p1 < neutron-patch-NSDriver.patch"})
385         if sdn is False:
386             virt_cmds.extend([
387                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
388                 {con.VIRT_RUN_CMD: "yum install -y "
389                                    "/root/nosdn_vpp_rpms/*.rpm"}
390             ])
391
392     undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
393         'installer_vm']['ip']
394     if sdn == 'opendaylight':
395         oc_builder.inject_opendaylight(
396             odl_version=ds_opts['odl_version'],
397             image=tmp_oc_image,
398             tmp_dir=tmp_dir,
399             uc_ip=undercloud_admin_ip,
400             os_version=ds_opts['os_version'],
401             docker_tag=docker_tag,
402         )
403         if docker_tag:
404             patched_containers = patched_containers.union({'opendaylight'})
405
406     if patches:
407         if ds_opts['os_version'] == 'master':
408             branch = ds_opts['os_version']
409         else:
410             branch = "stable/{}".format(ds_opts['os_version'])
411         logging.info('Adding patches to overcloud')
412         patched_containers = patched_containers.union(
413             c_builder.add_upstream_patches(patches,
414                                            tmp_oc_image, tmp_dir,
415                                            branch,
416                                            uc_ip=undercloud_admin_ip,
417                                            docker_tag=docker_tag))
418     # if containers with ceph, and no ceph device we need to use a
419     # persistent loop device for Ceph OSDs
420     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
421         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
422         with open(tmp_losetup, 'w') as fh:
423             fh.write(LOSETUP_SERVICE)
424         virt_cmds.extend([
425             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
426              },
427             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
428                 .format(LOOP_DEVICE_SIZE)},
429             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
430             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
431         ])
432     # TODO(trozet) remove this after LP#173474 is fixed
433     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
434     virt_cmds.append(
435         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
436                            "ConditionPathExists".format(dhcp_unit)})
437     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
438     logging.info("Overcloud image customization complete")
439     return patched_containers
440
441
442 def make_ssh_key():
443     """
444     Creates public and private ssh keys with 1024 bit RSA encryption
445     :return: private, public key
446     """
447     key = rsa.generate_private_key(
448         backend=crypto_default_backend(),
449         public_exponent=65537,
450         key_size=1024
451     )
452
453     private_key = key.private_bytes(
454         crypto_serialization.Encoding.PEM,
455         crypto_serialization.PrivateFormat.PKCS8,
456         crypto_serialization.NoEncryption())
457     public_key = key.public_key().public_bytes(
458         crypto_serialization.Encoding.OpenSSH,
459         crypto_serialization.PublicFormat.OpenSSH
460     )
461     return private_key.decode('utf-8'), public_key.decode('utf-8')
462
463
464 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
465     """
466     Creates modified opnfv/network environments for deployment
467     :param ds: deploy settings
468     :param ns: network settings
469     :param inv: node inventory
470     :param opnfv_env: file path for opnfv-environment file
471     :param net_env: file path for network-environment file
472     :param tmp_dir: Apex tmp dir
473     :return:
474     """
475
476     logging.info("Preparing opnfv-environment and network-environment files")
477     ds_opts = ds['deploy_options']
478     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
479     shutil.copyfile(opnfv_env, tmp_opnfv_env)
480     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
481     tenant_nic = dict()
482     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
483     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
484     external_nic_map = ns['networks']['external'][0]['nic_mapping']
485     external_nic = dict()
486     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
487
488     # SSH keys
489     private_key, public_key = make_ssh_key()
490
491     num_control, num_compute = inv.get_node_counts()
492     if num_control > 1 and not ds['global_params']['ha_enabled']:
493         num_control = 1
494
495     # Make easier/faster variables to index in the file editor
496     if 'performance' in ds_opts:
497         perf = True
498         # vpp
499         if 'vpp' in ds_opts['performance']['Compute']:
500             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
501         else:
502             perf_vpp_comp = None
503         if 'vpp' in ds_opts['performance']['Controller']:
504             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
505         else:
506             perf_vpp_ctrl = None
507
508         # ovs
509         if 'ovs' in ds_opts['performance']['Compute']:
510             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
511         else:
512             perf_ovs_comp = None
513
514         # kernel
515         if 'kernel' in ds_opts['performance']['Compute']:
516             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
517         else:
518             perf_kern_comp = None
519     else:
520         perf = False
521
522     tenant_settings = ns['networks']['tenant']
523     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
524         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
525
526     # Modify OPNFV environment
527     # TODO: Change to build a dict and outputting yaml rather than parsing
528     for line in fileinput.input(tmp_opnfv_env, inplace=True):
529         line = line.strip('\n')
530         output_line = line
531         if 'CloudDomain' in line:
532             output_line = "  CloudDomain: {}".format(ns['domain_name'])
533         elif 'replace_private_key' in line:
534             output_line = "    private_key: |\n"
535             key_out = ''
536             for line in private_key.splitlines():
537                 key_out += "      {}\n".format(line)
538             output_line += key_out
539         elif 'replace_public_key' in line:
540             output_line = "    public_key: '{}'".format(public_key)
541         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
542                 'resource_registry' in line:
543             output_line = "resource_registry:\n" \
544                           "  OS::TripleO::NodeUserData: first-boot.yaml"
545         elif 'ComputeExtraConfigPre' in line and \
546                 ds_opts['dataplane'] == 'ovs_dpdk':
547             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
548                           './ovs-dpdk-preconfig.yaml'
549         elif 'NeutronNetworkVLANRanges' in line:
550             vlan_setting = ''
551             if tenant_vlan_enabled:
552                 if ns['networks']['tenant']['overlay_id_range']:
553                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
554                     if 'datacentre' not in vlan_setting:
555                         vlan_setting += ',datacentre:1:1000'
556             # SRIOV networks are VLAN based provider networks. In order to
557             # simplify the deployment, nfv_sriov will be the default physnet.
558             # VLANs are not needed in advance, and the user will have to create
559             # the network specifying the segmentation-id.
560             if ds_opts['sriov']:
561                 if vlan_setting:
562                     vlan_setting += ",nfv_sriov"
563                 else:
564                     vlan_setting = "datacentre:1:1000,nfv_sriov"
565             if vlan_setting:
566                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
567         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
568             if tenant_settings['overlay_id_range']:
569                 physnets = tenant_settings['overlay_id_range'].split(',')
570                 output_line = "  NeutronBridgeMappings: "
571                 for physnet in physnets:
572                     physnet_name = physnet.split(':')[0]
573                     if physnet_name != 'datacentre':
574                         output_line += "{}:br-vlan,".format(physnet_name)
575                 output_line += "datacentre:br-ex"
576         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
577                 and ds_opts['sdn_controller'] == 'opendaylight':
578             if tenant_settings['overlay_id_range']:
579                 physnets = tenant_settings['overlay_id_range'].split(',')
580                 output_line = "  OpenDaylightProviderMappings: "
581                 for physnet in physnets:
582                     physnet_name = physnet.split(':')[0]
583                     if physnet_name != 'datacentre':
584                         output_line += "{}:br-vlan,".format(physnet_name)
585                 output_line += "datacentre:br-ex"
586         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
587             output_line = "  NeutronNetworkType: vlan\n" \
588                           "  NeutronTunnelTypes: ''"
589
590         if ds_opts['sdn_controller'] == 'opendaylight' and \
591                 'odl_vpp_routing_node' in ds_opts:
592             if 'opendaylight::vpp_routing_node' in line:
593                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
594                                .format(ds_opts['odl_vpp_routing_node'],
595                                        ns['domain_name']))
596         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
597             if 'NeutronVPPAgentPhysnets' in line:
598                 # VPP interface tap0 will be used for external network
599                 # connectivity.
600                 output_line = ("  NeutronVPPAgentPhysnets: "
601                                "'datacentre:{},external:tap0'"
602                                .format(tenant_nic['Controller']))
603         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
604                 'dvr') is True:
605             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
606                 output_line = ''
607             elif 'NeutronDhcpAgentsPerNetwork' in line:
608                 if num_compute == 0:
609                     num_dhcp_agents = num_control
610                 else:
611                     num_dhcp_agents = num_compute
612                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
613                                .format(num_dhcp_agents))
614             elif 'ComputeServices' in line:
615                 output_line = ("  ComputeServices:\n"
616                                "    - OS::TripleO::Services::NeutronDhcpAgent")
617
618         if perf:
619             for role in 'NovaCompute', 'Controller':
620                 if role == 'NovaCompute':
621                     perf_opts = perf_vpp_comp
622                 else:
623                     perf_opts = perf_vpp_ctrl
624                 cfg = "{}ExtraConfig".format(role)
625                 if cfg in line and perf_opts:
626                     perf_line = ''
627                     if 'main-core' in perf_opts:
628                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
629                                       .format(perf_opts['main-core']))
630                     if 'corelist-workers' in perf_opts:
631                         perf_line += ("\n    "
632                                       "fdio::vpp_cpu_corelist_workers: '{}'"
633                                       .format(perf_opts['corelist-workers']))
634                     if ds_opts['sdn_controller'] == 'opendaylight' and \
635                             ds_opts['dataplane'] == 'fdio':
636                         if role == 'NovaCompute':
637                             perf_line += ("\n    "
638                                           "tripleo::profile::base::neutron::"
639                                           "agents::honeycomb::"
640                                           "interface_role_mapping:"
641                                           " ['{}:tenant-interface',"
642                                           "'{}:public-interface']"
643                                           .format(tenant_nic[role],
644                                                   external_nic[role]))
645                         else:
646                             perf_line += ("\n    "
647                                           "tripleo::profile::base::neutron::"
648                                           "agents::honeycomb::"
649                                           "interface_role_mapping:"
650                                           " ['{}:tenant-interface']"
651                                           .format(tenant_nic[role]))
652                     if perf_line:
653                         output_line = ("  {}:{}".format(cfg, perf_line))
654
655             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
656                 for k, v in OVS_PERF_MAP.items():
657                     if k in line and v in perf_ovs_comp:
658                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
659
660             # kernel args
661             # (FIXME) use compute's kernel settings for all nodes for now.
662             if perf_kern_comp:
663                 if 'NovaSchedulerDefaultFilters' in line:
664                     output_line = \
665                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
666                         "ComputeFilter,AvailabilityZoneFilter," \
667                         "ComputeCapabilitiesFilter," \
668                         "ImagePropertiesFilter,NUMATopologyFilter'"
669                 elif 'ComputeKernelArgs' in line:
670                     kernel_args = ''
671                     for k, v in perf_kern_comp.items():
672                         kernel_args += "{}={} ".format(k, v)
673                     if kernel_args:
674                         output_line = "  ComputeKernelArgs: '{}'".\
675                             format(kernel_args)
676
677         print(output_line)
678
679     # Merge compute services into control services if only a single
680     # node deployment
681     if num_compute == 0:
682         logging.info("All in one deployment. Checking if service merging "
683                      "required into control services")
684         with open(tmp_opnfv_env, 'r') as fh:
685             data = yaml.safe_load(fh)
686         param_data = data['parameter_defaults']
687         # Check to see if any parameters are set for Compute
688         for param in param_data.keys():
689             if param != 'ComputeServices' and param.startswith('Compute'):
690                 logging.warning("Compute parameter set, but will not be used "
691                                 "in deployment: {}. Please use Controller "
692                                 "based parameters when using All-in-one "
693                                 "deployments".format(param))
694         if ('ControllerServices' in param_data and 'ComputeServices' in
695                 param_data):
696             logging.info("Services detected in environment file. Merging...")
697             ctrl_services = param_data['ControllerServices']
698             cmp_services = param_data['ComputeServices']
699             param_data['ControllerServices'] = list(set().union(
700                 ctrl_services, cmp_services))
701             for dup_service in DUPLICATE_COMPUTE_SERVICES:
702                 if dup_service in param_data['ControllerServices']:
703                     param_data['ControllerServices'].remove(dup_service)
704             param_data.pop('ComputeServices')
705             logging.debug("Merged controller services: {}".format(
706                 pprint.pformat(param_data['ControllerServices'])
707             ))
708             with open(tmp_opnfv_env, 'w') as fh:
709                 yaml.safe_dump(data, fh, default_flow_style=False)
710         else:
711             logging.info("No services detected in env file, not merging "
712                          "services")
713
714     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
715     with open(tmp_opnfv_env, 'r') as fh:
716         logging.debug("opnfv-environment content is : {}".format(
717             pprint.pformat(yaml.safe_load(fh.read()))
718         ))
719
720
721 def generate_ceph_key():
722     key = os.urandom(16)
723     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
724     return base64.b64encode(header + key)
725
726
727 def prep_storage_env(ds, ns, virtual, tmp_dir):
728     """
729     Creates storage environment file for deployment.  Source file is copied by
730     undercloud playbook to host.
731     :param ds:
732     :param ns:
733     :param virtual:
734     :param tmp_dir:
735     :return:
736     """
737     ds_opts = ds['deploy_options']
738     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
739     if not os.path.isfile(storage_file):
740         logging.error("storage-environment file is not in tmp directory: {}. "
741                       "Check if file was copied from "
742                       "undercloud".format(tmp_dir))
743         raise ApexDeployException("storage-environment file not copied from "
744                                   "undercloud")
745     for line in fileinput.input(storage_file, inplace=True):
746         line = line.strip('\n')
747         if 'CephClusterFSID' in line:
748             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
749         elif 'CephMonKey' in line:
750             print("  CephMonKey: {}".format(generate_ceph_key().decode(
751                 'utf-8')))
752         elif 'CephAdminKey' in line:
753             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
754                 'utf-8')))
755         elif 'CephClientKey' in line:
756             print("  CephClientKey: {}".format(generate_ceph_key().decode(
757                 'utf-8')))
758         else:
759             print(line)
760
761     if ds_opts['containers']:
762         ceph_params = {}
763
764         # max pgs allowed are calculated as num_mons * 200. Therefore we
765         # set number of pgs and pools so that the total will be less:
766         # num_pgs * num_pools * num_osds
767         ceph_params['CephPoolDefaultSize'] = 2
768         ceph_params['CephPoolDefaultPgNum'] = 32
769         if virtual:
770             ceph_params['CephAnsibleExtraConfig'] = {
771                 'centos_package_dependencies': [],
772                 'ceph_osd_docker_memory_limit': '1g',
773                 'ceph_mds_docker_memory_limit': '1g',
774             }
775         ceph_device = ds_opts['ceph_device']
776         ceph_params['CephAnsibleDisksConfig'] = {
777             'devices': [ceph_device],
778             'journal_size': 512,
779             'osd_scenario': 'collocated'
780         }
781         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
782     # TODO(trozet): remove following block as we only support containers now
783     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
784         with open(storage_file, 'a') as fh:
785             fh.write('  ExtraConfig:\n')
786             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
787                 ds_opts['ceph_device']
788             ))
789
790
791 def prep_sriov_env(ds, tmp_dir):
792     """
793     Creates SRIOV environment file for deployment. Source file is copied by
794     undercloud playbook to host.
795     :param ds:
796     :param tmp_dir:
797     :return:
798     """
799     ds_opts = ds['deploy_options']
800     sriov_iface = ds_opts['sriov']
801     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
802     if not os.path.isfile(sriov_file):
803         logging.error("sriov-environment file is not in tmp directory: {}. "
804                       "Check if file was copied from "
805                       "undercloud".format(tmp_dir))
806         raise ApexDeployException("sriov-environment file not copied from "
807                                   "undercloud")
808     # TODO(rnoriega): Instead of line editing, refactor this code to load
809     # yaml file into a dict, edit it and write the file back.
810     for line in fileinput.input(sriov_file, inplace=True):
811         line = line.strip('\n')
812         if 'NovaSchedulerDefaultFilters' in line:
813             print("  {}".format(line[3:]))
814         elif 'NovaSchedulerAvailableFilters' in line:
815             print("  {}".format(line[3:]))
816         elif 'NeutronPhysicalDevMappings' in line:
817             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
818                   .format(sriov_iface))
819         elif 'NeutronSriovNumVFs' in line:
820             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
821         elif 'NovaPCIPassthrough' in line:
822             print("  NovaPCIPassthrough:")
823         elif 'devname' in line:
824             print("    - devname: \"{}\"".format(sriov_iface))
825         elif 'physical_network' in line:
826             print("      physical_network: \"nfv_sriov\"")
827         else:
828             print(line)
829
830
831 def external_network_cmds(ns, ds):
832     """
833     Generates external network openstack commands
834     :param ns: network settings
835     :param ds: deploy settings
836     :return: list of commands to configure external network
837     """
838     ds_opts = ds['deploy_options']
839     external_physnet = 'datacentre'
840     if ds_opts['dataplane'] == 'fdio' and \
841        ds_opts['sdn_controller'] != 'opendaylight':
842         external_physnet = 'external'
843     if 'external' in ns.enabled_network_list:
844         net_config = ns['networks']['external'][0]
845         external = True
846         pool_start, pool_end = net_config['floating_ip_range']
847     else:
848         net_config = ns['networks']['admin']
849         external = False
850         pool_start, pool_end = ns['apex']['networks']['admin'][
851             'introspection_range']
852     nic_config = net_config['nic_mapping']
853     gateway = net_config['gateway']
854     cmds = list()
855     # create network command
856     if nic_config['compute']['vlan'] == 'native':
857         ext_type = 'flat'
858     else:
859         ext_type = "vlan --provider-segment {}".format(nic_config[
860                                                        'compute']['vlan'])
861     cmds.append("openstack network create external --project service "
862                 "--external --provider-network-type {} "
863                 "--provider-physical-network {}"
864                 .format(ext_type, external_physnet))
865     # create subnet command
866     cidr = net_config['cidr']
867     subnet_cmd = "openstack subnet create external-subnet --project " \
868                  "service --network external --no-dhcp --gateway {} " \
869                  "--allocation-pool start={},end={} --subnet-range " \
870                  "{}".format(gateway, pool_start, pool_end, str(cidr))
871     if external and cidr.version == 6:
872         subnet_cmd += ' --ip-version 6'
873     cmds.append(subnet_cmd)
874     logging.debug("Neutron external network commands determined "
875                   "as: {}".format(cmds))
876     return cmds
877
878
879 def create_congress_cmds(overcloud_file):
880     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
881     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
882     logging.info("Creating congress commands")
883     try:
884         ds_cfg = [
885             "username={}".format(overcloudrc['OS_USERNAME']),
886             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
887             "password={}".format(overcloudrc['OS_PASSWORD']),
888             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
889         ]
890     except KeyError:
891         logging.error("Unable to find all keys required for congress in "
892                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
893                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
894                       "file: {}".format(overcloud_file))
895         raise
896     cmds = list()
897     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
898
899     for driver in drivers:
900         if driver == 'doctor':
901             cmd = "{} \"{}\"".format(driver, driver)
902         else:
903             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
904         if driver == 'nova':
905             cmd += ' --config api_version="2.34"'
906         logging.debug("Congress command created: {}".format(cmd))
907         cmds.append(cmd)
908     return cmds