5491a6f2bbde1bf4bd88cc9e0f33c02e1ee8d18e
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import pprint
16 import shutil
17 import uuid
18 import struct
19 import time
20 import yaml
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
23
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
30     crypto_serialization
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33     crypto_default_backend
34
35
36 SDN_FILE_MAP = {
37     'opendaylight': {
38         'sfc': 'neutron-sfc-opendaylight.yaml',
39         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40         'gluon': 'gluon.yaml',
41         'vpp': {
42             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44             'default': 'neutron-opendaylight-honeycomb.yaml'
45         },
46         'l2gw': 'neutron-l2gw-opendaylight.yaml',
47         'sriov': 'neutron-opendaylight-sriov.yaml',
48         'default': 'neutron-opendaylight.yaml',
49     },
50     'onos': {
51         'sfc': 'neutron-onos-sfc.yaml',
52         'default': 'neutron-onos.yaml'
53     },
54     'ovn': 'neutron-ml2-ovn.yaml',
55     False: {
56         'vpp': 'neutron-ml2-vpp.yaml',
57         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
58     }
59 }
60
61 OTHER_FILE_MAP = {
62     'tacker': 'enable_tacker.yaml',
63     'congress': 'enable_congress.yaml',
64     'barometer': 'enable_barometer.yaml',
65     'rt_kvm': 'enable_rt_kvm.yaml'
66 }
67
68 OVS_PERF_MAP = {
69     'HostCpusList': 'dpdk_cores',
70     'NeutronDpdkCoreList': 'pmd_cores',
71     'NeutronDpdkSocketMemory': 'socket_memory',
72     'NeutronDpdkMemoryChannels': 'memory_channels'
73 }
74
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95 DUPLICATE_COMPUTE_SERVICES = [
96     'OS::TripleO::Services::ComputeNeutronCorePlugin',
97     'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98     'OS::TripleO::Services::ComputeNeutronOvsAgent',
99     'OS::TripleO::Services::ComputeNeutronL3Agent'
100 ]
101
102
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
104     """
105     Builds a list of SDN environment files to be used in the deploy cmd.
106
107     This function recursively searches an sdn_map.  First the sdn controller is
108     matched and then the function looks for enabled features for that
109     controller to determine which environment files should be used.  By
110     default the feature will be checked if set to true in deploy settings to be
111     added to the list.  If a feature does not have a boolean value, then the
112     key and value pair to compare with are checked as a tuple (k,v).
113
114     :param ds: deploy settings
115     :param sdn_map: SDN map to recursively search
116     :param env_list: recursive var to hold previously found env_list
117     :return: A list of env files
118     """
119     if env_list is None:
120         env_list = list()
121     for k, v in sdn_map.items():
122         if ds['sdn_controller'] == k or (k in ds and ds[k]):
123             if isinstance(v, dict):
124                 # Append default SDN env file first
125                 # The assumption is that feature-enabled SDN env files
126                 # override and do not conflict with previously set default
127                 # settings
128                 if ds['sdn_controller'] == k and 'default' in v:
129                     env_list.append(os.path.join(con.THT_ENV_DIR,
130                                                  v['default']))
131                 env_list.extend(build_sdn_env_list(ds, v))
132             # check if the value is not a boolean
133             elif isinstance(v, tuple):
134                     if ds[k] == v[0]:
135                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
136             else:
137                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138     if len(env_list) == 0:
139         try:
140             env_list.append(os.path.join(
141                 con.THT_ENV_DIR, sdn_map['default']))
142         except KeyError:
143             logging.warning("Unable to find default file for SDN")
144
145     return env_list
146
147
148 def get_docker_sdn_files(ds_opts):
149     """
150     Returns docker env file for detected SDN
151     :param ds_opts: deploy options
152     :return: list of docker THT env files for an SDN
153     """
154     docker_services = con.VALID_DOCKER_SERVICES
155     tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
156     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157     for i, sdn_file in enumerate(sdn_env_list):
158         sdn_base = os.path.basename(sdn_file)
159         if sdn_base in docker_services:
160             if docker_services[sdn_base] is not None:
161                 sdn_env_list[i] = \
162                     os.path.join(tht_dir, docker_services[sdn_base])
163             else:
164                 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
165     return sdn_env_list
166
167
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169                       virtual, env_file='opnfv-environment.yaml',
170                       net_data=False):
171
172     logging.info("Creating deployment command")
173     deploy_options = ['network-environment.yaml']
174
175     ds_opts = ds['deploy_options']
176
177     if ds_opts['containers']:
178         deploy_options.append(os.path.join(con.THT_ENV_DIR,
179                                            'docker.yaml'))
180
181     if ds['global_params']['ha_enabled']:
182         if ds_opts['containers']:
183             deploy_options.append(os.path.join(con.THT_ENV_DIR,
184                                                'docker-ha.yaml'))
185         else:
186             deploy_options.append(os.path.join(con.THT_ENV_DIR,
187                                                'puppet-pacemaker.yaml'))
188
189     if env_file:
190         deploy_options.append(env_file)
191
192     if ds_opts['containers']:
193         deploy_options.append('docker-images.yaml')
194         sdn_docker_files = get_docker_sdn_files(ds_opts)
195         for sdn_docker_file in sdn_docker_files:
196             deploy_options.append(sdn_docker_file)
197         if sdn_docker_files:
198             deploy_options.append('sdn-images.yaml')
199     else:
200         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
201
202     for k, v in OTHER_FILE_MAP.items():
203         if k in ds_opts and ds_opts[k]:
204             if ds_opts['containers']:
205                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
206                                                    "{}.yaml".format(k)))
207             else:
208                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
209
210     # TODO(trozet) Fix this check to look for if ceph is in controller services
211     # and not use name of the file
212     if ds_opts['ceph'] and 'csit' not in env_file:
213         prep_storage_env(ds, ns, virtual, tmp_dir)
214         deploy_options.append(os.path.join(con.THT_ENV_DIR,
215                                            'storage-environment.yaml'))
216     if ds_opts['sriov']:
217         prep_sriov_env(ds, tmp_dir)
218
219     # Check for 'k8s' here intentionally, as we may support other values
220     # such as openstack/openshift for 'vim' option.
221     if ds_opts['vim'] == 'k8s':
222         deploy_options.append('kubernetes-environment.yaml')
223
224     if virtual:
225         deploy_options.append('virtual-environment.yaml')
226     else:
227         deploy_options.append('baremetal-environment.yaml')
228
229     num_control, num_compute = inv.get_node_counts()
230     if num_control > 1 and not ds['global_params']['ha_enabled']:
231         num_control = 1
232     if platform.machine() == 'aarch64':
233         # aarch64 deploys were not completing in the default 90 mins.
234         # Not sure if this is related to the hardware the OOO support
235         # was developed on or the virtualization support in CentOS
236         # Either way it will probably get better over time  as the aarch
237         # support matures in CentOS and deploy time should be tested in
238         # the future so this multiplier can be removed.
239         con.DEPLOY_TIMEOUT *= 2
240     cmd = "openstack overcloud deploy --templates --timeout {} " \
241           .format(con.DEPLOY_TIMEOUT)
242     # build cmd env args
243     for option in deploy_options:
244         cmd += " -e {}".format(option)
245     cmd += " --ntp-server {}".format(ns['ntp'][0])
246     cmd += " --control-scale {}".format(num_control)
247     cmd += " --compute-scale {}".format(num_compute)
248     cmd += ' --control-flavor control --compute-flavor compute'
249     if net_data:
250         cmd += ' --networks-file network_data.yaml'
251     libvirt_type = 'kvm'
252     if virtual:
253         with open('/sys/module/kvm_intel/parameters/nested') as f:
254             nested_kvm = f.read().strip()
255             if nested_kvm != 'Y':
256                 libvirt_type = 'qemu'
257     cmd += ' --libvirt-type {}'.format(libvirt_type)
258     logging.info("Deploy command set: {}".format(cmd))
259
260     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
261         fh.write(cmd)
262     return cmd
263
264
265 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
266                patches=None):
267     """
268     Locates sdn image and preps for deployment.
269     :param ds: deploy settings
270     :param ns: network settings
271     :param img: sdn image
272     :param tmp_dir: dir to store modified sdn image
273     :param root_pw: password to configure for overcloud image
274     :param docker_tag: Docker image tag for RDO version (default None)
275     :param patches: List of patches to apply to overcloud image
276     :return: None
277     """
278     # TODO(trozet): Come up with a better way to organize this logic in this
279     # function
280     logging.info("Preparing image: {} for deployment".format(img))
281     if not os.path.isfile(img):
282         logging.error("Missing SDN image {}".format(img))
283         raise ApexDeployException("Missing SDN image file: {}".format(img))
284
285     ds_opts = ds['deploy_options']
286     virt_cmds = list()
287     sdn = ds_opts['sdn_controller']
288     patched_containers = set()
289     # we need this due to rhbz #1436021
290     # fixed in systemd-219-37.el7
291     if sdn is not False:
292         logging.info("Neutron openvswitch-agent disabled")
293         virt_cmds.extend([{
294             con.VIRT_RUN_CMD:
295                 "rm -f /etc/systemd/system/multi-user.target.wants/"
296                 "neutron-openvswitch-agent.service"},
297             {
298             con.VIRT_RUN_CMD:
299                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
300                 ".service"
301         }])
302
303     if ns.get('http_proxy', ''):
304         virt_cmds.append({
305             con.VIRT_RUN_CMD:
306                 "echo 'http_proxy={}' >> /etc/environment".format(
307                     ns['http_proxy'])})
308
309     if ns.get('https_proxy', ''):
310         virt_cmds.append({
311             con.VIRT_RUN_CMD:
312                 "echo 'https_proxy={}' >> /etc/environment".format(
313                     ns['https_proxy'])})
314
315     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
316     shutil.copyfile(img, tmp_oc_image)
317     logging.debug("Temporary overcloud image stored as: {}".format(
318         tmp_oc_image))
319
320     if ds_opts['vpn']:
321         oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
322         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
323         virt_cmds.append({
324             con.VIRT_RUN_CMD:
325                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
326                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
327         virt_cmds.append({
328             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
329                               "zrpcd_start.sh"})
330         virt_cmds.append({
331             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
332                               "init.d/zrpcd_start.sh' /etc/rc.local "})
333         virt_cmds.append({
334             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
335                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
336         logging.info("ZRPCD process started")
337
338     dataplane = ds_opts['dataplane']
339     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
340         logging.info("Enabling kernel modules for dpdk")
341         # file to module mapping
342         uio_types = {
343             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
344             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
345         }
346         for mod_file, mod in uio_types.items():
347             with open(mod_file, 'w') as fh:
348                 fh.write('#!/bin/bash\n')
349                 fh.write('exec /sbin/modprobe {}'.format(mod))
350                 fh.close()
351
352             virt_cmds.extend([
353                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
354                     mod_file)},
355                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
356                                    "{}".format(os.path.basename(mod_file))}
357             ])
358     if root_pw:
359         pw_op = "password:{}".format(root_pw)
360         virt_cmds.append({con.VIRT_PW: pw_op})
361
362     if dataplane == 'ovs':
363         if ds_opts['sfc']:
364             oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
365         elif sdn == 'opendaylight':
366             # FIXME(trozet) remove this after RDO is updated with fix for
367             # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
368             ovs_file = os.path.basename(con.CUSTOM_OVS)
369             ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
370             utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
371                                             targets=[ovs_file])
372             virt_cmds.extend([
373                 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
374                                                                   ovs_file))},
375                 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
376                     ovs_file)}
377             ])
378
379     if dataplane == 'fdio':
380         # Patch neutron with using OVS external interface for router
381         # and add generic linux NS interface driver
382         virt_cmds.append(
383             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
384                                "-p1 < neutron-patch-NSDriver.patch"})
385         if sdn is False:
386             virt_cmds.extend([
387                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
388                 {con.VIRT_RUN_CMD: "yum install -y "
389                                    "/root/nosdn_vpp_rpms/*.rpm"}
390             ])
391
392     undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
393         'installer_vm']['ip']
394     if sdn == 'opendaylight':
395         oc_builder.inject_opendaylight(
396             odl_version=ds_opts['odl_version'],
397             image=tmp_oc_image,
398             tmp_dir=tmp_dir,
399             uc_ip=undercloud_admin_ip,
400             os_version=ds_opts['os_version'],
401             docker_tag=docker_tag,
402         )
403         if docker_tag:
404             patched_containers = patched_containers.union({'opendaylight'})
405
406     if patches:
407         if ds_opts['os_version'] == 'master':
408             branch = ds_opts['os_version']
409         else:
410             branch = "stable/{}".format(ds_opts['os_version'])
411         logging.info('Adding patches to overcloud')
412         patched_containers = patched_containers.union(
413             c_builder.add_upstream_patches(patches,
414                                            tmp_oc_image, tmp_dir,
415                                            branch,
416                                            uc_ip=undercloud_admin_ip,
417                                            docker_tag=docker_tag))
418     # if containers with ceph, and no ceph device we need to use a
419     # persistent loop device for Ceph OSDs
420     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
421         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
422         with open(tmp_losetup, 'w') as fh:
423             fh.write(LOSETUP_SERVICE)
424         virt_cmds.extend([
425             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
426              },
427             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
428                 .format(LOOP_DEVICE_SIZE)},
429             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
430             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
431         ])
432     # TODO(trozet) remove this after LP#173474 is fixed
433     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
434     virt_cmds.append(
435         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
436                            "ConditionPathExists".format(dhcp_unit)})
437     # Prep for NFS
438     virt_cmds.extend([
439         {con.VIRT_INSTALL: "nfs-utils"},
440         {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
441                            "/etc/systemd/system/multi-user.target.wants/"
442                            "nfs-server.service"},
443         {con.VIRT_RUN_CMD: "mkdir -p /glance"},
444         {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
445         {con.VIRT_RUN_CMD: "mkdir -p /nova"},
446         {con.VIRT_RUN_CMD: "chmod 777 /glance"},
447         {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
448         {con.VIRT_RUN_CMD: "chmod 777 /nova"},
449         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /glance"},
450         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /cinder"},
451         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /nova"},
452         {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
453                            "no_root_squash,no_acl)' > /etc/exports"},
454         {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
455                            "no_root_squash,no_acl)' >> /etc/exports"},
456         {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
457                            "no_root_squash,no_acl)' >> /etc/exports"},
458         {con.VIRT_RUN_CMD: "exportfs -avr"},
459     ])
460     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
461     logging.info("Overcloud image customization complete")
462     return patched_containers
463
464
465 def make_ssh_key():
466     """
467     Creates public and private ssh keys with 1024 bit RSA encryption
468     :return: private, public key
469     """
470     key = rsa.generate_private_key(
471         backend=crypto_default_backend(),
472         public_exponent=65537,
473         key_size=1024
474     )
475
476     private_key = key.private_bytes(
477         crypto_serialization.Encoding.PEM,
478         crypto_serialization.PrivateFormat.PKCS8,
479         crypto_serialization.NoEncryption())
480     public_key = key.public_key().public_bytes(
481         crypto_serialization.Encoding.OpenSSH,
482         crypto_serialization.PublicFormat.OpenSSH
483     )
484     return private_key.decode('utf-8'), public_key.decode('utf-8')
485
486
487 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
488     """
489     Creates modified opnfv/network environments for deployment
490     :param ds: deploy settings
491     :param ns: network settings
492     :param inv: node inventory
493     :param opnfv_env: file path for opnfv-environment file
494     :param net_env: file path for network-environment file
495     :param tmp_dir: Apex tmp dir
496     :return:
497     """
498
499     logging.info("Preparing opnfv-environment and network-environment files")
500     ds_opts = ds['deploy_options']
501     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
502     shutil.copyfile(opnfv_env, tmp_opnfv_env)
503     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
504     tenant_nic = dict()
505     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
506     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
507     external_nic_map = ns['networks']['external'][0]['nic_mapping']
508     external_nic = dict()
509     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
510
511     # SSH keys
512     private_key, public_key = make_ssh_key()
513
514     num_control, num_compute = inv.get_node_counts()
515     if num_control > 1 and not ds['global_params']['ha_enabled']:
516         num_control = 1
517
518     # Make easier/faster variables to index in the file editor
519     if 'performance' in ds_opts:
520         perf = True
521         # vpp
522         if 'vpp' in ds_opts['performance']['Compute']:
523             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
524         else:
525             perf_vpp_comp = None
526         if 'vpp' in ds_opts['performance']['Controller']:
527             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
528         else:
529             perf_vpp_ctrl = None
530
531         # ovs
532         if 'ovs' in ds_opts['performance']['Compute']:
533             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
534         else:
535             perf_ovs_comp = None
536
537         # kernel
538         if 'kernel' in ds_opts['performance']['Compute']:
539             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
540         else:
541             perf_kern_comp = None
542     else:
543         perf = False
544
545     tenant_settings = ns['networks']['tenant']
546     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
547         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
548
549     # Modify OPNFV environment
550     # TODO: Change to build a dict and outputting yaml rather than parsing
551     for line in fileinput.input(tmp_opnfv_env, inplace=True):
552         line = line.strip('\n')
553         output_line = line
554         if 'CloudDomain' in line:
555             output_line = "  CloudDomain: {}".format(ns['domain_name'])
556         elif 'replace_private_key' in line:
557             output_line = "    private_key: |\n"
558             key_out = ''
559             for line in private_key.splitlines():
560                 key_out += "      {}\n".format(line)
561             output_line += key_out
562         elif 'replace_public_key' in line:
563             output_line = "    public_key: '{}'".format(public_key)
564         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
565                 'resource_registry' in line:
566             output_line = "resource_registry:\n" \
567                           "  OS::TripleO::NodeUserData: first-boot.yaml"
568         elif 'ComputeExtraConfigPre' in line and \
569                 ds_opts['dataplane'] == 'ovs_dpdk':
570             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
571                           './ovs-dpdk-preconfig.yaml'
572         elif 'NeutronNetworkVLANRanges' in line:
573             vlan_setting = ''
574             if tenant_vlan_enabled:
575                 if ns['networks']['tenant']['overlay_id_range']:
576                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
577                     if 'datacentre' not in vlan_setting:
578                         vlan_setting += ',datacentre:1:1000'
579             # SRIOV networks are VLAN based provider networks. In order to
580             # simplify the deployment, nfv_sriov will be the default physnet.
581             # VLANs are not needed in advance, and the user will have to create
582             # the network specifying the segmentation-id.
583             if ds_opts['sriov']:
584                 if vlan_setting:
585                     vlan_setting += ",nfv_sriov"
586                 else:
587                     vlan_setting = "datacentre:1:1000,nfv_sriov"
588             if vlan_setting:
589                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
590         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
591             if tenant_settings['overlay_id_range']:
592                 physnets = tenant_settings['overlay_id_range'].split(',')
593                 output_line = "  NeutronBridgeMappings: "
594                 for physnet in physnets:
595                     physnet_name = physnet.split(':')[0]
596                     if physnet_name != 'datacentre':
597                         output_line += "{}:br-vlan,".format(physnet_name)
598                 output_line += "datacentre:br-ex"
599         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
600                 and ds_opts['sdn_controller'] == 'opendaylight':
601             if tenant_settings['overlay_id_range']:
602                 physnets = tenant_settings['overlay_id_range'].split(',')
603                 output_line = "  OpenDaylightProviderMappings: "
604                 for physnet in physnets:
605                     physnet_name = physnet.split(':')[0]
606                     if physnet_name != 'datacentre':
607                         output_line += "{}:br-vlan,".format(physnet_name)
608                 output_line += "datacentre:br-ex"
609         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
610             output_line = "  NeutronNetworkType: vlan\n" \
611                           "  NeutronTunnelTypes: ''"
612
613         if ds_opts['sdn_controller'] == 'opendaylight' and \
614                 'odl_vpp_routing_node' in ds_opts:
615             if 'opendaylight::vpp_routing_node' in line:
616                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
617                                .format(ds_opts['odl_vpp_routing_node'],
618                                        ns['domain_name']))
619         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
620             if 'NeutronVPPAgentPhysnets' in line:
621                 # VPP interface tap0 will be used for external network
622                 # connectivity.
623                 output_line = ("  NeutronVPPAgentPhysnets: "
624                                "'datacentre:{},external:tap0'"
625                                .format(tenant_nic['Controller']))
626         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
627                 'dvr') is True:
628             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
629                 output_line = ''
630             elif 'NeutronDhcpAgentsPerNetwork' in line:
631                 if num_compute == 0:
632                     num_dhcp_agents = num_control
633                 else:
634                     num_dhcp_agents = num_compute
635                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
636                                .format(num_dhcp_agents))
637             elif 'ComputeServices' in line:
638                 output_line = ("  ComputeServices:\n"
639                                "    - OS::TripleO::Services::NeutronDhcpAgent")
640
641         if perf:
642             for role in 'NovaCompute', 'Controller':
643                 if role == 'NovaCompute':
644                     perf_opts = perf_vpp_comp
645                 else:
646                     perf_opts = perf_vpp_ctrl
647                 cfg = "{}ExtraConfig".format(role)
648                 if cfg in line and perf_opts:
649                     perf_line = ''
650                     if 'main-core' in perf_opts:
651                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
652                                       .format(perf_opts['main-core']))
653                     if 'corelist-workers' in perf_opts:
654                         perf_line += ("\n    "
655                                       "fdio::vpp_cpu_corelist_workers: '{}'"
656                                       .format(perf_opts['corelist-workers']))
657                     if ds_opts['sdn_controller'] == 'opendaylight' and \
658                             ds_opts['dataplane'] == 'fdio':
659                         if role == 'NovaCompute':
660                             perf_line += ("\n    "
661                                           "tripleo::profile::base::neutron::"
662                                           "agents::honeycomb::"
663                                           "interface_role_mapping:"
664                                           " ['{}:tenant-interface',"
665                                           "'{}:public-interface']"
666                                           .format(tenant_nic[role],
667                                                   external_nic[role]))
668                         else:
669                             perf_line += ("\n    "
670                                           "tripleo::profile::base::neutron::"
671                                           "agents::honeycomb::"
672                                           "interface_role_mapping:"
673                                           " ['{}:tenant-interface']"
674                                           .format(tenant_nic[role]))
675                     if perf_line:
676                         output_line = ("  {}:{}".format(cfg, perf_line))
677
678             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
679                 for k, v in OVS_PERF_MAP.items():
680                     if k in line and v in perf_ovs_comp:
681                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
682
683             # kernel args
684             # (FIXME) use compute's kernel settings for all nodes for now.
685             if perf_kern_comp:
686                 if 'NovaSchedulerDefaultFilters' in line:
687                     output_line = \
688                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
689                         "ComputeFilter,AvailabilityZoneFilter," \
690                         "ComputeCapabilitiesFilter," \
691                         "ImagePropertiesFilter,NUMATopologyFilter'"
692                 elif 'ComputeKernelArgs' in line:
693                     kernel_args = ''
694                     for k, v in perf_kern_comp.items():
695                         kernel_args += "{}={} ".format(k, v)
696                     if kernel_args:
697                         output_line = "  ComputeKernelArgs: '{}'".\
698                             format(kernel_args)
699
700         print(output_line)
701
702     # Merge compute services into control services if only a single
703     # node deployment
704     if num_compute == 0:
705         logging.info("All in one deployment. Checking if service merging "
706                      "required into control services")
707         with open(tmp_opnfv_env, 'r') as fh:
708             data = yaml.safe_load(fh)
709         param_data = data['parameter_defaults']
710         # Check to see if any parameters are set for Compute
711         for param in param_data.keys():
712             if param != 'ComputeServices' and param.startswith('Compute'):
713                 logging.warning("Compute parameter set, but will not be used "
714                                 "in deployment: {}. Please use Controller "
715                                 "based parameters when using All-in-one "
716                                 "deployments".format(param))
717         if ('ControllerServices' in param_data and 'ComputeServices' in
718                 param_data):
719             logging.info("Services detected in environment file. Merging...")
720             ctrl_services = param_data['ControllerServices']
721             cmp_services = param_data['ComputeServices']
722             param_data['ControllerServices'] = list(set().union(
723                 ctrl_services, cmp_services))
724             for dup_service in DUPLICATE_COMPUTE_SERVICES:
725                 if dup_service in param_data['ControllerServices']:
726                     param_data['ControllerServices'].remove(dup_service)
727             param_data.pop('ComputeServices')
728             logging.debug("Merged controller services: {}".format(
729                 pprint.pformat(param_data['ControllerServices'])
730             ))
731             with open(tmp_opnfv_env, 'w') as fh:
732                 yaml.safe_dump(data, fh, default_flow_style=False)
733         else:
734             logging.info("No services detected in env file, not merging "
735                          "services")
736
737     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
738     with open(tmp_opnfv_env, 'r') as fh:
739         logging.debug("opnfv-environment content is : {}".format(
740             pprint.pformat(yaml.safe_load(fh.read()))
741         ))
742
743
744 def generate_ceph_key():
745     key = os.urandom(16)
746     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
747     return base64.b64encode(header + key)
748
749
750 def prep_storage_env(ds, ns, virtual, tmp_dir):
751     """
752     Creates storage environment file for deployment.  Source file is copied by
753     undercloud playbook to host.
754     :param ds:
755     :param ns:
756     :param virtual:
757     :param tmp_dir:
758     :return:
759     """
760     ds_opts = ds['deploy_options']
761     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
762     if not os.path.isfile(storage_file):
763         logging.error("storage-environment file is not in tmp directory: {}. "
764                       "Check if file was copied from "
765                       "undercloud".format(tmp_dir))
766         raise ApexDeployException("storage-environment file not copied from "
767                                   "undercloud")
768     for line in fileinput.input(storage_file, inplace=True):
769         line = line.strip('\n')
770         if 'CephClusterFSID' in line:
771             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
772         elif 'CephMonKey' in line:
773             print("  CephMonKey: {}".format(generate_ceph_key().decode(
774                 'utf-8')))
775         elif 'CephAdminKey' in line:
776             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
777                 'utf-8')))
778         elif 'CephClientKey' in line:
779             print("  CephClientKey: {}".format(generate_ceph_key().decode(
780                 'utf-8')))
781         else:
782             print(line)
783
784     if ds_opts['containers']:
785         ceph_params = {}
786
787         # max pgs allowed are calculated as num_mons * 200. Therefore we
788         # set number of pgs and pools so that the total will be less:
789         # num_pgs * num_pools * num_osds
790         ceph_params['CephPoolDefaultSize'] = 2
791         ceph_params['CephPoolDefaultPgNum'] = 32
792         if virtual:
793             ceph_params['CephAnsibleExtraConfig'] = {
794                 'centos_package_dependencies': [],
795                 'ceph_osd_docker_memory_limit': '1g',
796                 'ceph_mds_docker_memory_limit': '1g',
797             }
798         ceph_device = ds_opts['ceph_device']
799         ceph_params['CephAnsibleDisksConfig'] = {
800             'devices': [ceph_device],
801             'journal_size': 512,
802             'osd_scenario': 'collocated'
803         }
804         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
805     # TODO(trozet): remove following block as we only support containers now
806     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
807         with open(storage_file, 'a') as fh:
808             fh.write('  ExtraConfig:\n')
809             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
810                 ds_opts['ceph_device']
811             ))
812
813
814 def prep_sriov_env(ds, tmp_dir):
815     """
816     Creates SRIOV environment file for deployment. Source file is copied by
817     undercloud playbook to host.
818     :param ds:
819     :param tmp_dir:
820     :return:
821     """
822     ds_opts = ds['deploy_options']
823     sriov_iface = ds_opts['sriov']
824     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
825     if not os.path.isfile(sriov_file):
826         logging.error("sriov-environment file is not in tmp directory: {}. "
827                       "Check if file was copied from "
828                       "undercloud".format(tmp_dir))
829         raise ApexDeployException("sriov-environment file not copied from "
830                                   "undercloud")
831     # TODO(rnoriega): Instead of line editing, refactor this code to load
832     # yaml file into a dict, edit it and write the file back.
833     for line in fileinput.input(sriov_file, inplace=True):
834         line = line.strip('\n')
835         if 'NovaSchedulerDefaultFilters' in line:
836             print("  {}".format(line[3:]))
837         elif 'NovaSchedulerAvailableFilters' in line:
838             print("  {}".format(line[3:]))
839         elif 'NeutronPhysicalDevMappings' in line:
840             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
841                   .format(sriov_iface))
842         elif 'NeutronSriovNumVFs' in line:
843             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
844         elif 'NovaPCIPassthrough' in line:
845             print("  NovaPCIPassthrough:")
846         elif 'devname' in line:
847             print("    - devname: \"{}\"".format(sriov_iface))
848         elif 'physical_network' in line:
849             print("      physical_network: \"nfv_sriov\"")
850         else:
851             print(line)
852
853
854 def external_network_cmds(ns, ds):
855     """
856     Generates external network openstack commands
857     :param ns: network settings
858     :param ds: deploy settings
859     :return: list of commands to configure external network
860     """
861     ds_opts = ds['deploy_options']
862     external_physnet = 'datacentre'
863     if ds_opts['dataplane'] == 'fdio' and \
864        ds_opts['sdn_controller'] != 'opendaylight':
865         external_physnet = 'external'
866     if 'external' in ns.enabled_network_list:
867         net_config = ns['networks']['external'][0]
868         external = True
869         pool_start, pool_end = net_config['floating_ip_range']
870     else:
871         net_config = ns['networks']['admin']
872         external = False
873         pool_start, pool_end = ns['apex']['networks']['admin'][
874             'introspection_range']
875     nic_config = net_config['nic_mapping']
876     gateway = net_config['gateway']
877     cmds = list()
878     # create network command
879     if nic_config['compute']['vlan'] == 'native':
880         ext_type = 'flat'
881     else:
882         ext_type = "vlan --provider-segment {}".format(nic_config[
883                                                        'compute']['vlan'])
884     cmds.append("openstack network create external --project service "
885                 "--external --provider-network-type {} "
886                 "--provider-physical-network {}"
887                 .format(ext_type, external_physnet))
888     # create subnet command
889     cidr = net_config['cidr']
890     subnet_cmd = "openstack subnet create external-subnet --project " \
891                  "service --network external --no-dhcp --gateway {} " \
892                  "--allocation-pool start={},end={} --subnet-range " \
893                  "{}".format(gateway, pool_start, pool_end, str(cidr))
894     if external and cidr.version == 6:
895         subnet_cmd += ' --ip-version 6'
896     cmds.append(subnet_cmd)
897     logging.debug("Neutron external network commands determined "
898                   "as: {}".format(cmds))
899     return cmds
900
901
902 def create_congress_cmds(overcloud_file):
903     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
904     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
905     logging.info("Creating congress commands")
906     try:
907         ds_cfg = [
908             "username={}".format(overcloudrc['OS_USERNAME']),
909             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
910             "password={}".format(overcloudrc['OS_PASSWORD']),
911             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
912         ]
913     except KeyError:
914         logging.error("Unable to find all keys required for congress in "
915                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
916                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
917                       "file: {}".format(overcloud_file))
918         raise
919     cmds = list()
920     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
921
922     for driver in drivers:
923         if driver == 'doctor':
924             cmd = "{} \"{}\"".format(driver, driver)
925         else:
926             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
927         if driver == 'nova':
928             cmd += ' --config api_version="2.34"'
929         logging.debug("Congress command created: {}".format(cmd))
930         cmds.append(cmd)
931     return cmds