Disable NFS when all-in-one deployment
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import pprint
16 import shutil
17 import uuid
18 import struct
19 import time
20 import yaml
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
23
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
30     crypto_serialization
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33     crypto_default_backend
34
35
36 SDN_FILE_MAP = {
37     'opendaylight': {
38         'sfc': 'neutron-sfc-opendaylight.yaml',
39         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40         'gluon': 'gluon.yaml',
41         'vpp': {
42             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44             'default': 'neutron-opendaylight-honeycomb.yaml'
45         },
46         'l2gw': 'neutron-l2gw-opendaylight.yaml',
47         'sriov': 'neutron-opendaylight-sriov.yaml',
48         'default': 'neutron-opendaylight.yaml',
49     },
50     'onos': {
51         'sfc': 'neutron-onos-sfc.yaml',
52         'default': 'neutron-onos.yaml'
53     },
54     'ovn': 'neutron-ml2-ovn.yaml',
55     False: {
56         'vpp': 'neutron-ml2-vpp.yaml',
57         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
58     }
59 }
60
61 OTHER_FILE_MAP = {
62     'tacker': 'enable_tacker.yaml',
63     'congress': 'enable_congress.yaml',
64     'barometer': 'enable_barometer.yaml',
65     'rt_kvm': 'enable_rt_kvm.yaml'
66 }
67
68 OVS_PERF_MAP = {
69     'HostCpusList': 'dpdk_cores',
70     'NeutronDpdkCoreList': 'pmd_cores',
71     'NeutronDpdkSocketMemory': 'socket_memory',
72     'NeutronDpdkMemoryChannels': 'memory_channels'
73 }
74
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95 DUPLICATE_COMPUTE_SERVICES = [
96     'OS::TripleO::Services::ComputeNeutronCorePlugin',
97     'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98     'OS::TripleO::Services::ComputeNeutronOvsAgent',
99     'OS::TripleO::Services::ComputeNeutronL3Agent'
100 ]
101
102 NFS_VARS = [
103     'NovaNfsEnabled',
104     'GlanceNfsEnabled',
105     'CinderNfsEnabledBackend'
106 ]
107
108
109 def build_sdn_env_list(ds, sdn_map, env_list=None):
110     """
111     Builds a list of SDN environment files to be used in the deploy cmd.
112
113     This function recursively searches an sdn_map.  First the sdn controller is
114     matched and then the function looks for enabled features for that
115     controller to determine which environment files should be used.  By
116     default the feature will be checked if set to true in deploy settings to be
117     added to the list.  If a feature does not have a boolean value, then the
118     key and value pair to compare with are checked as a tuple (k,v).
119
120     :param ds: deploy settings
121     :param sdn_map: SDN map to recursively search
122     :param env_list: recursive var to hold previously found env_list
123     :return: A list of env files
124     """
125     if env_list is None:
126         env_list = list()
127     for k, v in sdn_map.items():
128         if ds['sdn_controller'] == k or (k in ds and ds[k]):
129             if isinstance(v, dict):
130                 # Append default SDN env file first
131                 # The assumption is that feature-enabled SDN env files
132                 # override and do not conflict with previously set default
133                 # settings
134                 if ds['sdn_controller'] == k and 'default' in v:
135                     env_list.append(os.path.join(con.THT_ENV_DIR,
136                                                  v['default']))
137                 env_list.extend(build_sdn_env_list(ds, v))
138             # check if the value is not a boolean
139             elif isinstance(v, tuple):
140                     if ds[k] == v[0]:
141                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
142             else:
143                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
144     if len(env_list) == 0:
145         try:
146             env_list.append(os.path.join(
147                 con.THT_ENV_DIR, sdn_map['default']))
148         except KeyError:
149             logging.warning("Unable to find default file for SDN")
150
151     return env_list
152
153
154 def get_docker_sdn_files(ds_opts):
155     """
156     Returns docker env file for detected SDN
157     :param ds_opts: deploy options
158     :return: list of docker THT env files for an SDN
159     """
160     docker_services = con.VALID_DOCKER_SERVICES
161     tht_dir = con.THT_DOCKER_ENV_DIR
162     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
163     for i, sdn_file in enumerate(sdn_env_list):
164         sdn_base = os.path.basename(sdn_file)
165         if sdn_base in docker_services:
166             if docker_services[sdn_base] is not None:
167                 sdn_env_list[i] = \
168                     os.path.join(tht_dir, docker_services[sdn_base])
169             else:
170                 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
171     return sdn_env_list
172
173
174 def create_deploy_cmd(ds, ns, inv, tmp_dir,
175                       virtual, env_file='opnfv-environment.yaml',
176                       net_data=False):
177
178     logging.info("Creating deployment command")
179     deploy_options = ['network-environment.yaml']
180
181     ds_opts = ds['deploy_options']
182
183     if ds_opts['containers']:
184         deploy_options.append(os.path.join(con.THT_ENV_DIR,
185                                            'docker.yaml'))
186
187     if ds['global_params']['ha_enabled']:
188         if ds_opts['containers']:
189             deploy_options.append(os.path.join(con.THT_ENV_DIR,
190                                                'docker-ha.yaml'))
191         else:
192             deploy_options.append(os.path.join(con.THT_ENV_DIR,
193                                                'puppet-pacemaker.yaml'))
194
195     if env_file:
196         deploy_options.append(env_file)
197
198     if ds_opts['containers']:
199         deploy_options.append('docker-images.yaml')
200         sdn_docker_files = get_docker_sdn_files(ds_opts)
201         for sdn_docker_file in sdn_docker_files:
202             deploy_options.append(sdn_docker_file)
203     else:
204         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
205
206     for k, v in OTHER_FILE_MAP.items():
207         if k in ds_opts and ds_opts[k]:
208             if ds_opts['containers']:
209                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
210                                                    "{}.yaml".format(k)))
211             else:
212                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
213
214     # TODO(trozet) Fix this check to look for if ceph is in controller services
215     # and not use name of the file
216     if ds_opts['ceph'] and 'csit' not in env_file:
217         prep_storage_env(ds, ns, virtual, tmp_dir)
218         deploy_options.append(os.path.join(con.THT_ENV_DIR,
219                                            'storage-environment.yaml'))
220     if ds_opts['sriov']:
221         prep_sriov_env(ds, tmp_dir)
222
223     # Check for 'k8s' here intentionally, as we may support other values
224     # such as openstack/openshift for 'vim' option.
225     if ds_opts['vim'] == 'k8s':
226         deploy_options.append('kubernetes-environment.yaml')
227
228     if virtual:
229         deploy_options.append('virtual-environment.yaml')
230     else:
231         deploy_options.append('baremetal-environment.yaml')
232
233     num_control, num_compute = inv.get_node_counts()
234     if num_control > 1 and not ds['global_params']['ha_enabled']:
235         num_control = 1
236     if platform.machine() == 'aarch64':
237         # aarch64 deploys were not completing in the default 90 mins.
238         # Not sure if this is related to the hardware the OOO support
239         # was developed on or the virtualization support in CentOS
240         # Either way it will probably get better over time  as the aarch
241         # support matures in CentOS and deploy time should be tested in
242         # the future so this multiplier can be removed.
243         con.DEPLOY_TIMEOUT *= 2
244     cmd = "openstack overcloud deploy --templates --timeout {} " \
245           .format(con.DEPLOY_TIMEOUT)
246     # build cmd env args
247     for option in deploy_options:
248         cmd += " -e {}".format(option)
249     cmd += " --ntp-server {}".format(ns['ntp'][0])
250     cmd += " --control-scale {}".format(num_control)
251     cmd += " --compute-scale {}".format(num_compute)
252     cmd += ' --control-flavor control --compute-flavor compute'
253     if net_data:
254         cmd += ' --networks-file network_data.yaml'
255     libvirt_type = 'kvm'
256     if virtual and (platform.machine() != 'aarch64'):
257         with open('/sys/module/kvm_intel/parameters/nested') as f:
258             nested_kvm = f.read().strip()
259             if nested_kvm != 'Y':
260                 libvirt_type = 'qemu'
261     elif virtual and (platform.machine() == 'aarch64'):
262         libvirt_type = 'qemu'
263     cmd += ' --libvirt-type {}'.format(libvirt_type)
264     if platform.machine() == 'aarch64':
265         cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
266     logging.info("Deploy command set: {}".format(cmd))
267
268     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
269         fh.write(cmd)
270     return cmd
271
272
273 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
274                patches=None):
275     """
276     Locates sdn image and preps for deployment.
277     :param ds: deploy settings
278     :param ns: network settings
279     :param img: sdn image
280     :param tmp_dir: dir to store modified sdn image
281     :param root_pw: password to configure for overcloud image
282     :param docker_tag: Docker image tag for RDO version (default None)
283     :param patches: List of patches to apply to overcloud image
284     :return: None
285     """
286     # TODO(trozet): Come up with a better way to organize this logic in this
287     # function
288     logging.info("Preparing image: {} for deployment".format(img))
289     if not os.path.isfile(img):
290         logging.error("Missing SDN image {}".format(img))
291         raise ApexDeployException("Missing SDN image file: {}".format(img))
292
293     ds_opts = ds['deploy_options']
294     virt_cmds = list()
295     sdn = ds_opts['sdn_controller']
296     patched_containers = set()
297     # we need this due to rhbz #1436021
298     # fixed in systemd-219-37.el7
299     if sdn is not False:
300         logging.info("Neutron openvswitch-agent disabled")
301         virt_cmds.extend([{
302             con.VIRT_RUN_CMD:
303                 "rm -f /etc/systemd/system/multi-user.target.wants/"
304                 "neutron-openvswitch-agent.service"},
305             {
306             con.VIRT_RUN_CMD:
307                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
308                 ".service"
309         }])
310
311     if ns.get('http_proxy', ''):
312         virt_cmds.append({
313             con.VIRT_RUN_CMD:
314                 "echo 'http_proxy={}' >> /etc/environment".format(
315                     ns['http_proxy'])})
316
317     if ns.get('https_proxy', ''):
318         virt_cmds.append({
319             con.VIRT_RUN_CMD:
320                 "echo 'https_proxy={}' >> /etc/environment".format(
321                     ns['https_proxy'])})
322
323     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
324     shutil.copyfile(img, tmp_oc_image)
325     logging.debug("Temporary overcloud image stored as: {}".format(
326         tmp_oc_image))
327
328     if ds_opts['vpn']:
329         oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
330         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
331         virt_cmds.append({
332             con.VIRT_RUN_CMD:
333                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
334                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
335         virt_cmds.append({
336             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
337                               "zrpcd_start.sh"})
338         virt_cmds.append({
339             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
340                               "init.d/zrpcd_start.sh' /etc/rc.local "})
341         virt_cmds.append({
342             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
343                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
344         logging.info("ZRPCD process started")
345
346     dataplane = ds_opts['dataplane']
347     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
348         logging.info("Enabling kernel modules for dpdk")
349         # file to module mapping
350         uio_types = {
351             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
352             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
353         }
354         for mod_file, mod in uio_types.items():
355             with open(mod_file, 'w') as fh:
356                 fh.write('#!/bin/bash\n')
357                 fh.write('exec /sbin/modprobe {}'.format(mod))
358                 fh.close()
359
360             virt_cmds.extend([
361                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
362                     mod_file)},
363                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
364                                    "{}".format(os.path.basename(mod_file))}
365             ])
366     if root_pw:
367         pw_op = "password:{}".format(root_pw)
368         virt_cmds.append({con.VIRT_PW: pw_op})
369
370     if dataplane == 'ovs':
371         if ds_opts['sfc']:
372             oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
373         elif sdn == 'opendaylight':
374             # FIXME(trozet) remove this after RDO is updated with fix for
375             # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
376             ovs_file = os.path.basename(con.CUSTOM_OVS)
377             ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
378             utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
379                                             targets=[ovs_file])
380             virt_cmds.extend([
381                 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
382                                                                   ovs_file))},
383                 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
384                     ovs_file)}
385             ])
386
387     if dataplane == 'fdio':
388         # Patch neutron with using OVS external interface for router
389         # and add generic linux NS interface driver
390         virt_cmds.append(
391             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
392                                "-p1 < neutron-patch-NSDriver.patch"})
393         if sdn is False:
394             virt_cmds.extend([
395                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
396                 {con.VIRT_RUN_CMD: "yum install -y "
397                                    "/root/nosdn_vpp_rpms/*.rpm"}
398             ])
399
400     undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
401         'installer_vm']['ip']
402     if sdn == 'opendaylight':
403         oc_builder.inject_opendaylight(
404             odl_version=ds_opts['odl_version'],
405             image=tmp_oc_image,
406             tmp_dir=tmp_dir,
407             uc_ip=undercloud_admin_ip,
408             os_version=ds_opts['os_version'],
409             docker_tag=docker_tag,
410         )
411         if docker_tag:
412             patched_containers = patched_containers.union({'opendaylight'})
413
414     if patches:
415         if ds_opts['os_version'] == 'master':
416             branch = ds_opts['os_version']
417         else:
418             branch = "stable/{}".format(ds_opts['os_version'])
419         logging.info('Adding patches to overcloud')
420         patched_containers = patched_containers.union(
421             c_builder.add_upstream_patches(patches,
422                                            tmp_oc_image, tmp_dir,
423                                            branch,
424                                            uc_ip=undercloud_admin_ip,
425                                            docker_tag=docker_tag))
426     # if containers with ceph, and no ceph device we need to use a
427     # persistent loop device for Ceph OSDs
428     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
429         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
430         with open(tmp_losetup, 'w') as fh:
431             fh.write(LOSETUP_SERVICE)
432         virt_cmds.extend([
433             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
434              },
435             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
436                 .format(LOOP_DEVICE_SIZE)},
437             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
438             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
439         ])
440     # TODO(trozet) remove this after LP#173474 is fixed
441     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
442     virt_cmds.append(
443         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
444                            "ConditionPathExists".format(dhcp_unit)})
445     # Prep for NFS
446     virt_cmds.extend([
447         {con.VIRT_INSTALL: "nfs-utils"},
448         {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
449                            "/etc/systemd/system/multi-user.target.wants/"
450                            "nfs-server.service"},
451         {con.VIRT_RUN_CMD: "mkdir -p /glance"},
452         {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
453         {con.VIRT_RUN_CMD: "mkdir -p /nova"},
454         {con.VIRT_RUN_CMD: "chmod 777 /glance"},
455         {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
456         {con.VIRT_RUN_CMD: "chmod 777 /nova"},
457         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /glance"},
458         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /cinder"},
459         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /nova"},
460         {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
461                            "no_root_squash,no_acl)' > /etc/exports"},
462         {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
463                            "no_root_squash,no_acl)' >> /etc/exports"},
464         {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
465                            "no_root_squash,no_acl)' >> /etc/exports"},
466         {con.VIRT_RUN_CMD: "exportfs -avr"},
467     ])
468     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
469     logging.info("Overcloud image customization complete")
470     return patched_containers
471
472
473 def make_ssh_key():
474     """
475     Creates public and private ssh keys with 1024 bit RSA encryption
476     :return: private, public key
477     """
478     key = rsa.generate_private_key(
479         backend=crypto_default_backend(),
480         public_exponent=65537,
481         key_size=1024
482     )
483
484     private_key = key.private_bytes(
485         crypto_serialization.Encoding.PEM,
486         crypto_serialization.PrivateFormat.PKCS8,
487         crypto_serialization.NoEncryption())
488     public_key = key.public_key().public_bytes(
489         crypto_serialization.Encoding.OpenSSH,
490         crypto_serialization.PublicFormat.OpenSSH
491     )
492     return private_key.decode('utf-8'), public_key.decode('utf-8')
493
494
495 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
496     """
497     Creates modified opnfv/network environments for deployment
498     :param ds: deploy settings
499     :param ns: network settings
500     :param inv: node inventory
501     :param opnfv_env: file path for opnfv-environment file
502     :param net_env: file path for network-environment file
503     :param tmp_dir: Apex tmp dir
504     :return:
505     """
506
507     logging.info("Preparing opnfv-environment and network-environment files")
508     ds_opts = ds['deploy_options']
509     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
510     shutil.copyfile(opnfv_env, tmp_opnfv_env)
511     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
512     tenant_nic = dict()
513     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
514     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
515     external_nic_map = ns['networks']['external'][0]['nic_mapping']
516     external_nic = dict()
517     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
518
519     # SSH keys
520     private_key, public_key = make_ssh_key()
521
522     num_control, num_compute = inv.get_node_counts()
523     if num_control > 1 and not ds['global_params']['ha_enabled']:
524         num_control = 1
525
526     # Make easier/faster variables to index in the file editor
527     if 'performance' in ds_opts:
528         perf = True
529         # vpp
530         if 'vpp' in ds_opts['performance']['Compute']:
531             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
532         else:
533             perf_vpp_comp = None
534         if 'vpp' in ds_opts['performance']['Controller']:
535             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
536         else:
537             perf_vpp_ctrl = None
538
539         # ovs
540         if 'ovs' in ds_opts['performance']['Compute']:
541             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
542         else:
543             perf_ovs_comp = None
544
545         # kernel
546         if 'kernel' in ds_opts['performance']['Compute']:
547             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
548         else:
549             perf_kern_comp = None
550     else:
551         perf = False
552
553     tenant_settings = ns['networks']['tenant']
554     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
555         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
556
557     # Modify OPNFV environment
558     # TODO: Change to build a dict and outputting yaml rather than parsing
559     for line in fileinput.input(tmp_opnfv_env, inplace=True):
560         line = line.strip('\n')
561         output_line = line
562         if 'CloudDomain' in line:
563             output_line = "  CloudDomain: {}".format(ns['domain_name'])
564         elif 'replace_private_key' in line:
565             output_line = "    private_key: |\n"
566             key_out = ''
567             for line in private_key.splitlines():
568                 key_out += "      {}\n".format(line)
569             output_line += key_out
570         elif 'replace_public_key' in line:
571             output_line = "    public_key: '{}'".format(public_key)
572         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
573                 'resource_registry' in line:
574             output_line = "resource_registry:\n" \
575                           "  OS::TripleO::NodeUserData: first-boot.yaml"
576         elif 'ComputeExtraConfigPre' in line and \
577                 ds_opts['dataplane'] == 'ovs_dpdk':
578             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
579                           './ovs-dpdk-preconfig.yaml'
580         elif 'NeutronNetworkVLANRanges' in line:
581             vlan_setting = ''
582             if tenant_vlan_enabled:
583                 if ns['networks']['tenant']['overlay_id_range']:
584                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
585                     if 'datacentre' not in vlan_setting:
586                         vlan_setting += ',datacentre:1:1000'
587             # SRIOV networks are VLAN based provider networks. In order to
588             # simplify the deployment, nfv_sriov will be the default physnet.
589             # VLANs are not needed in advance, and the user will have to create
590             # the network specifying the segmentation-id.
591             if ds_opts['sriov']:
592                 if vlan_setting:
593                     vlan_setting += ",nfv_sriov"
594                 else:
595                     vlan_setting = "datacentre:1:1000,nfv_sriov"
596             if vlan_setting:
597                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
598         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
599             if tenant_settings['overlay_id_range']:
600                 physnets = tenant_settings['overlay_id_range'].split(',')
601                 output_line = "  NeutronBridgeMappings: "
602                 for physnet in physnets:
603                     physnet_name = physnet.split(':')[0]
604                     if physnet_name != 'datacentre':
605                         output_line += "{}:br-vlan,".format(physnet_name)
606                 output_line += "datacentre:br-ex"
607         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
608                 and ds_opts['sdn_controller'] == 'opendaylight':
609             if tenant_settings['overlay_id_range']:
610                 physnets = tenant_settings['overlay_id_range'].split(',')
611                 output_line = "  OpenDaylightProviderMappings: "
612                 for physnet in physnets:
613                     physnet_name = physnet.split(':')[0]
614                     if physnet_name != 'datacentre':
615                         output_line += "{}:br-vlan,".format(physnet_name)
616                 output_line += "datacentre:br-ex"
617         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
618             output_line = "  NeutronNetworkType: vlan\n" \
619                           "  NeutronTunnelTypes: ''"
620
621         if ds_opts['sdn_controller'] == 'opendaylight' and \
622                 'odl_vpp_routing_node' in ds_opts:
623             if 'opendaylight::vpp_routing_node' in line:
624                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
625                                .format(ds_opts['odl_vpp_routing_node'],
626                                        ns['domain_name']))
627         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
628             if 'NeutronVPPAgentPhysnets' in line:
629                 # VPP interface tap0 will be used for external network
630                 # connectivity.
631                 output_line = ("  NeutronVPPAgentPhysnets: "
632                                "'datacentre:{},external:tap0'"
633                                .format(tenant_nic['Controller']))
634         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
635                 'dvr') is True:
636             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
637                 output_line = ''
638             elif 'NeutronDhcpAgentsPerNetwork' in line:
639                 if num_compute == 0:
640                     num_dhcp_agents = num_control
641                 else:
642                     num_dhcp_agents = num_compute
643                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
644                                .format(num_dhcp_agents))
645             elif 'ComputeServices' in line:
646                 output_line = ("  ComputeServices:\n"
647                                "    - OS::TripleO::Services::NeutronDhcpAgent")
648
649         if perf:
650             for role in 'NovaCompute', 'Controller':
651                 if role == 'NovaCompute':
652                     perf_opts = perf_vpp_comp
653                 else:
654                     perf_opts = perf_vpp_ctrl
655                 cfg = "{}ExtraConfig".format(role)
656                 if cfg in line and perf_opts:
657                     perf_line = ''
658                     if 'main-core' in perf_opts:
659                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
660                                       .format(perf_opts['main-core']))
661                     if 'corelist-workers' in perf_opts:
662                         perf_line += ("\n    "
663                                       "fdio::vpp_cpu_corelist_workers: '{}'"
664                                       .format(perf_opts['corelist-workers']))
665                     if ds_opts['sdn_controller'] == 'opendaylight' and \
666                             ds_opts['dataplane'] == 'fdio':
667                         if role == 'NovaCompute':
668                             perf_line += ("\n    "
669                                           "tripleo::profile::base::neutron::"
670                                           "agents::honeycomb::"
671                                           "interface_role_mapping:"
672                                           " ['{}:tenant-interface',"
673                                           "'{}:public-interface']"
674                                           .format(tenant_nic[role],
675                                                   external_nic[role]))
676                         else:
677                             perf_line += ("\n    "
678                                           "tripleo::profile::base::neutron::"
679                                           "agents::honeycomb::"
680                                           "interface_role_mapping:"
681                                           " ['{}:tenant-interface']"
682                                           .format(tenant_nic[role]))
683                     if perf_line:
684                         output_line = ("  {}:{}".format(cfg, perf_line))
685
686             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
687                 for k, v in OVS_PERF_MAP.items():
688                     if k in line and v in perf_ovs_comp:
689                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
690
691             # kernel args
692             # (FIXME) use compute's kernel settings for all nodes for now.
693             if perf_kern_comp:
694                 if 'NovaSchedulerDefaultFilters' in line:
695                     output_line = \
696                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
697                         "ComputeFilter,AvailabilityZoneFilter," \
698                         "ComputeCapabilitiesFilter," \
699                         "ImagePropertiesFilter,NUMATopologyFilter'"
700                 elif 'ComputeKernelArgs' in line:
701                     kernel_args = ''
702                     for k, v in perf_kern_comp.items():
703                         kernel_args += "{}={} ".format(k, v)
704                     if kernel_args:
705                         output_line = "  ComputeKernelArgs: '{}'".\
706                             format(kernel_args)
707
708         print(output_line)
709
710     # Merge compute services into control services if only a single
711     # node deployment
712     if num_compute == 0:
713         with open(tmp_opnfv_env, 'r') as fh:
714             data = yaml.safe_load(fh)
715         param_data = data['parameter_defaults']
716         logging.info("All in one deployment detected")
717         logging.info("Disabling NFS in env file")
718         # Check to see if any parameters are set for Compute
719         for param in param_data.keys():
720             if param != 'ComputeServices' and param.startswith('Compute'):
721                 logging.warning("Compute parameter set, but will not be used "
722                                 "in deployment: {}. Please use Controller "
723                                 "based parameters when using All-in-one "
724                                 "deployments".format(param))
725             if param in NFS_VARS:
726                 param_data[param] = False
727         logging.info("Checking if service merging required into "
728                      "control services")
729         if ('ControllerServices' in param_data and 'ComputeServices' in
730                 param_data):
731             logging.info("Services detected in environment file. Merging...")
732             ctrl_services = param_data['ControllerServices']
733             cmp_services = param_data['ComputeServices']
734             param_data['ControllerServices'] = list(set().union(
735                 ctrl_services, cmp_services))
736             for dup_service in DUPLICATE_COMPUTE_SERVICES:
737                 if dup_service in param_data['ControllerServices']:
738                     param_data['ControllerServices'].remove(dup_service)
739             param_data.pop('ComputeServices')
740             logging.debug("Merged controller services: {}".format(
741                 pprint.pformat(param_data['ControllerServices'])
742             ))
743         else:
744             logging.info("No services detected in env file, not merging "
745                          "services")
746         with open(tmp_opnfv_env, 'w') as fh:
747             yaml.safe_dump(data, fh, default_flow_style=False)
748
749     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
750     with open(tmp_opnfv_env, 'r') as fh:
751         logging.debug("opnfv-environment content is : {}".format(
752             pprint.pformat(yaml.safe_load(fh.read()))
753         ))
754
755
756 def generate_ceph_key():
757     key = os.urandom(16)
758     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
759     return base64.b64encode(header + key)
760
761
762 def prep_storage_env(ds, ns, virtual, tmp_dir):
763     """
764     Creates storage environment file for deployment.  Source file is copied by
765     undercloud playbook to host.
766     :param ds:
767     :param ns:
768     :param virtual:
769     :param tmp_dir:
770     :return:
771     """
772     ds_opts = ds['deploy_options']
773     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
774     if not os.path.isfile(storage_file):
775         logging.error("storage-environment file is not in tmp directory: {}. "
776                       "Check if file was copied from "
777                       "undercloud".format(tmp_dir))
778         raise ApexDeployException("storage-environment file not copied from "
779                                   "undercloud")
780     for line in fileinput.input(storage_file, inplace=True):
781         line = line.strip('\n')
782         if 'CephClusterFSID' in line:
783             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
784         elif 'CephMonKey' in line:
785             print("  CephMonKey: {}".format(generate_ceph_key().decode(
786                 'utf-8')))
787         elif 'CephAdminKey' in line:
788             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
789                 'utf-8')))
790         elif 'CephClientKey' in line:
791             print("  CephClientKey: {}".format(generate_ceph_key().decode(
792                 'utf-8')))
793         else:
794             print(line)
795
796     if ds_opts['containers']:
797         ceph_params = {}
798
799         # max pgs allowed are calculated as num_mons * 200. Therefore we
800         # set number of pgs and pools so that the total will be less:
801         # num_pgs * num_pools * num_osds
802         ceph_params['CephPoolDefaultSize'] = 2
803         ceph_params['CephPoolDefaultPgNum'] = 32
804         if virtual:
805             ceph_params['CephAnsibleExtraConfig'] = {
806                 'centos_package_dependencies': [],
807                 'ceph_osd_docker_memory_limit': '1g',
808                 'ceph_mds_docker_memory_limit': '1g',
809             }
810         ceph_device = ds_opts['ceph_device']
811         ceph_params['CephAnsibleDisksConfig'] = {
812             'devices': [ceph_device],
813             'journal_size': 512,
814             'osd_scenario': 'collocated'
815         }
816         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
817     # TODO(trozet): remove following block as we only support containers now
818     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
819         with open(storage_file, 'a') as fh:
820             fh.write('  ExtraConfig:\n')
821             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
822                 ds_opts['ceph_device']
823             ))
824
825
826 def prep_sriov_env(ds, tmp_dir):
827     """
828     Creates SRIOV environment file for deployment. Source file is copied by
829     undercloud playbook to host.
830     :param ds:
831     :param tmp_dir:
832     :return:
833     """
834     ds_opts = ds['deploy_options']
835     sriov_iface = ds_opts['sriov']
836     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
837     if not os.path.isfile(sriov_file):
838         logging.error("sriov-environment file is not in tmp directory: {}. "
839                       "Check if file was copied from "
840                       "undercloud".format(tmp_dir))
841         raise ApexDeployException("sriov-environment file not copied from "
842                                   "undercloud")
843     # TODO(rnoriega): Instead of line editing, refactor this code to load
844     # yaml file into a dict, edit it and write the file back.
845     for line in fileinput.input(sriov_file, inplace=True):
846         line = line.strip('\n')
847         if 'NovaSchedulerDefaultFilters' in line:
848             print("  {}".format(line[3:]))
849         elif 'NovaSchedulerAvailableFilters' in line:
850             print("  {}".format(line[3:]))
851         elif 'NeutronPhysicalDevMappings' in line:
852             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
853                   .format(sriov_iface))
854         elif 'NeutronSriovNumVFs' in line:
855             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
856         elif 'NovaPCIPassthrough' in line:
857             print("  NovaPCIPassthrough:")
858         elif 'devname' in line:
859             print("    - devname: \"{}\"".format(sriov_iface))
860         elif 'physical_network' in line:
861             print("      physical_network: \"nfv_sriov\"")
862         else:
863             print(line)
864
865
866 def external_network_cmds(ns, ds):
867     """
868     Generates external network openstack commands
869     :param ns: network settings
870     :param ds: deploy settings
871     :return: list of commands to configure external network
872     """
873     ds_opts = ds['deploy_options']
874     external_physnet = 'datacentre'
875     if ds_opts['dataplane'] == 'fdio' and \
876        ds_opts['sdn_controller'] != 'opendaylight':
877         external_physnet = 'external'
878     if 'external' in ns.enabled_network_list:
879         net_config = ns['networks']['external'][0]
880         external = True
881         pool_start, pool_end = net_config['floating_ip_range']
882     else:
883         net_config = ns['networks']['admin']
884         external = False
885         pool_start, pool_end = ns['apex']['networks']['admin'][
886             'introspection_range']
887     nic_config = net_config['nic_mapping']
888     gateway = net_config['gateway']
889     cmds = list()
890     # create network command
891     if nic_config['compute']['vlan'] == 'native':
892         ext_type = 'flat'
893     else:
894         ext_type = "vlan --provider-segment {}".format(nic_config[
895                                                        'compute']['vlan'])
896     cmds.append("openstack network create external --project service "
897                 "--external --provider-network-type {} "
898                 "--provider-physical-network {}"
899                 .format(ext_type, external_physnet))
900     # create subnet command
901     cidr = net_config['cidr']
902     subnet_cmd = "openstack subnet create external-subnet --project " \
903                  "service --network external --no-dhcp --gateway {} " \
904                  "--allocation-pool start={},end={} --subnet-range " \
905                  "{}".format(gateway, pool_start, pool_end, str(cidr))
906     if external and cidr.version == 6:
907         subnet_cmd += ' --ip-version 6'
908     cmds.append(subnet_cmd)
909     logging.debug("Neutron external network commands determined "
910                   "as: {}".format(cmds))
911     return cmds
912
913
914 def create_congress_cmds(overcloud_file):
915     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
916     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
917     logging.info("Creating congress commands")
918     try:
919         ds_cfg = [
920             "username={}".format(overcloudrc['OS_USERNAME']),
921             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
922             "password={}".format(overcloudrc['OS_PASSWORD']),
923             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
924         ]
925     except KeyError:
926         logging.error("Unable to find all keys required for congress in "
927                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
928                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
929                       "file: {}".format(overcloud_file))
930         raise
931     cmds = list()
932     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
933
934     for driver in drivers:
935         if driver == 'doctor':
936             cmd = "{} \"{}\"".format(driver, driver)
937         else:
938             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
939         if driver == 'nova':
940             cmd += ' --config api_version="2.34"'
941         logging.debug("Congress command created: {}".format(cmd))
942         cmds.append(cmd)
943     return cmds