Disable NFS when all-in-one deployment
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import pprint
16 import shutil
17 import uuid
18 import struct
19 import time
20 import yaml
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
23
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
30     crypto_serialization
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33     crypto_default_backend
34
35
36 SDN_FILE_MAP = {
37     'opendaylight': {
38         'sfc': 'neutron-sfc-opendaylight.yaml',
39         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40         'gluon': 'gluon.yaml',
41         'vpp': {
42             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44             'default': 'neutron-opendaylight-honeycomb.yaml'
45         },
46         'l2gw': 'neutron-l2gw-opendaylight.yaml',
47         'sriov': 'neutron-opendaylight-sriov.yaml',
48         'default': 'neutron-opendaylight.yaml',
49     },
50     'onos': {
51         'sfc': 'neutron-onos-sfc.yaml',
52         'default': 'neutron-onos.yaml'
53     },
54     'ovn': 'neutron-ml2-ovn.yaml',
55     False: {
56         'vpp': 'neutron-ml2-vpp.yaml',
57         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
58     }
59 }
60
61 OTHER_FILE_MAP = {
62     'tacker': 'enable_tacker.yaml',
63     'congress': 'enable_congress.yaml',
64     'barometer': 'enable_barometer.yaml',
65     'rt_kvm': 'enable_rt_kvm.yaml'
66 }
67
68 OVS_PERF_MAP = {
69     'HostCpusList': 'dpdk_cores',
70     'NeutronDpdkCoreList': 'pmd_cores',
71     'NeutronDpdkSocketMemory': 'socket_memory',
72     'NeutronDpdkMemoryChannels': 'memory_channels'
73 }
74
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95 DUPLICATE_COMPUTE_SERVICES = [
96     'OS::TripleO::Services::ComputeNeutronCorePlugin',
97     'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98     'OS::TripleO::Services::ComputeNeutronOvsAgent',
99     'OS::TripleO::Services::ComputeNeutronL3Agent'
100 ]
101
102 NFS_VARS = [
103     'NovaNfsEnabled',
104     'GlanceNfsEnabled',
105     'CinderNfsEnabledBackend'
106 ]
107
108
109 def build_sdn_env_list(ds, sdn_map, env_list=None):
110     """
111     Builds a list of SDN environment files to be used in the deploy cmd.
112
113     This function recursively searches an sdn_map.  First the sdn controller is
114     matched and then the function looks for enabled features for that
115     controller to determine which environment files should be used.  By
116     default the feature will be checked if set to true in deploy settings to be
117     added to the list.  If a feature does not have a boolean value, then the
118     key and value pair to compare with are checked as a tuple (k,v).
119
120     :param ds: deploy settings
121     :param sdn_map: SDN map to recursively search
122     :param env_list: recursive var to hold previously found env_list
123     :return: A list of env files
124     """
125     if env_list is None:
126         env_list = list()
127     for k, v in sdn_map.items():
128         if ds['sdn_controller'] == k or (k in ds and ds[k]):
129             if isinstance(v, dict):
130                 # Append default SDN env file first
131                 # The assumption is that feature-enabled SDN env files
132                 # override and do not conflict with previously set default
133                 # settings
134                 if ds['sdn_controller'] == k and 'default' in v:
135                     env_list.append(os.path.join(con.THT_ENV_DIR,
136                                                  v['default']))
137                 env_list.extend(build_sdn_env_list(ds, v))
138             # check if the value is not a boolean
139             elif isinstance(v, tuple):
140                     if ds[k] == v[0]:
141                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
142             else:
143                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
144     if len(env_list) == 0:
145         try:
146             env_list.append(os.path.join(
147                 con.THT_ENV_DIR, sdn_map['default']))
148         except KeyError:
149             logging.warning("Unable to find default file for SDN")
150
151     return env_list
152
153
154 def get_docker_sdn_files(ds_opts):
155     """
156     Returns docker env file for detected SDN
157     :param ds_opts: deploy options
158     :return: list of docker THT env files for an SDN
159     """
160     docker_services = con.VALID_DOCKER_SERVICES
161     tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
162     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
163     for i, sdn_file in enumerate(sdn_env_list):
164         sdn_base = os.path.basename(sdn_file)
165         if sdn_base in docker_services:
166             if docker_services[sdn_base] is not None:
167                 sdn_env_list[i] = \
168                     os.path.join(tht_dir, docker_services[sdn_base])
169             else:
170                 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
171     return sdn_env_list
172
173
174 def create_deploy_cmd(ds, ns, inv, tmp_dir,
175                       virtual, env_file='opnfv-environment.yaml',
176                       net_data=False):
177
178     logging.info("Creating deployment command")
179     deploy_options = ['network-environment.yaml']
180
181     ds_opts = ds['deploy_options']
182
183     if ds_opts['containers']:
184         deploy_options.append(os.path.join(con.THT_ENV_DIR,
185                                            'docker.yaml'))
186
187     if ds['global_params']['ha_enabled']:
188         if ds_opts['containers']:
189             deploy_options.append(os.path.join(con.THT_ENV_DIR,
190                                                'docker-ha.yaml'))
191         else:
192             deploy_options.append(os.path.join(con.THT_ENV_DIR,
193                                                'puppet-pacemaker.yaml'))
194
195     if env_file:
196         deploy_options.append(env_file)
197
198     if ds_opts['containers']:
199         deploy_options.append('docker-images.yaml')
200         sdn_docker_files = get_docker_sdn_files(ds_opts)
201         for sdn_docker_file in sdn_docker_files:
202             deploy_options.append(sdn_docker_file)
203         if sdn_docker_files:
204             deploy_options.append('sdn-images.yaml')
205     else:
206         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
207
208     for k, v in OTHER_FILE_MAP.items():
209         if k in ds_opts and ds_opts[k]:
210             if ds_opts['containers']:
211                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
212                                                    "{}.yaml".format(k)))
213             else:
214                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
215
216     # TODO(trozet) Fix this check to look for if ceph is in controller services
217     # and not use name of the file
218     if ds_opts['ceph'] and 'csit' not in env_file:
219         prep_storage_env(ds, ns, virtual, tmp_dir)
220         deploy_options.append(os.path.join(con.THT_ENV_DIR,
221                                            'storage-environment.yaml'))
222     if ds_opts['sriov']:
223         prep_sriov_env(ds, tmp_dir)
224
225     # Check for 'k8s' here intentionally, as we may support other values
226     # such as openstack/openshift for 'vim' option.
227     if ds_opts['vim'] == 'k8s':
228         deploy_options.append('kubernetes-environment.yaml')
229
230     if virtual:
231         deploy_options.append('virtual-environment.yaml')
232     else:
233         deploy_options.append('baremetal-environment.yaml')
234
235     num_control, num_compute = inv.get_node_counts()
236     if num_control > 1 and not ds['global_params']['ha_enabled']:
237         num_control = 1
238     if platform.machine() == 'aarch64':
239         # aarch64 deploys were not completing in the default 90 mins.
240         # Not sure if this is related to the hardware the OOO support
241         # was developed on or the virtualization support in CentOS
242         # Either way it will probably get better over time  as the aarch
243         # support matures in CentOS and deploy time should be tested in
244         # the future so this multiplier can be removed.
245         con.DEPLOY_TIMEOUT *= 2
246     cmd = "openstack overcloud deploy --templates --timeout {} " \
247           .format(con.DEPLOY_TIMEOUT)
248     # build cmd env args
249     for option in deploy_options:
250         cmd += " -e {}".format(option)
251     cmd += " --ntp-server {}".format(ns['ntp'][0])
252     cmd += " --control-scale {}".format(num_control)
253     cmd += " --compute-scale {}".format(num_compute)
254     cmd += ' --control-flavor control --compute-flavor compute'
255     if net_data:
256         cmd += ' --networks-file network_data.yaml'
257     libvirt_type = 'kvm'
258     if virtual:
259         with open('/sys/module/kvm_intel/parameters/nested') as f:
260             nested_kvm = f.read().strip()
261             if nested_kvm != 'Y':
262                 libvirt_type = 'qemu'
263     cmd += ' --libvirt-type {}'.format(libvirt_type)
264     logging.info("Deploy command set: {}".format(cmd))
265
266     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
267         fh.write(cmd)
268     return cmd
269
270
271 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
272                patches=None):
273     """
274     Locates sdn image and preps for deployment.
275     :param ds: deploy settings
276     :param ns: network settings
277     :param img: sdn image
278     :param tmp_dir: dir to store modified sdn image
279     :param root_pw: password to configure for overcloud image
280     :param docker_tag: Docker image tag for RDO version (default None)
281     :param patches: List of patches to apply to overcloud image
282     :return: None
283     """
284     # TODO(trozet): Come up with a better way to organize this logic in this
285     # function
286     logging.info("Preparing image: {} for deployment".format(img))
287     if not os.path.isfile(img):
288         logging.error("Missing SDN image {}".format(img))
289         raise ApexDeployException("Missing SDN image file: {}".format(img))
290
291     ds_opts = ds['deploy_options']
292     virt_cmds = list()
293     sdn = ds_opts['sdn_controller']
294     patched_containers = set()
295     # we need this due to rhbz #1436021
296     # fixed in systemd-219-37.el7
297     if sdn is not False:
298         logging.info("Neutron openvswitch-agent disabled")
299         virt_cmds.extend([{
300             con.VIRT_RUN_CMD:
301                 "rm -f /etc/systemd/system/multi-user.target.wants/"
302                 "neutron-openvswitch-agent.service"},
303             {
304             con.VIRT_RUN_CMD:
305                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
306                 ".service"
307         }])
308
309     if ns.get('http_proxy', ''):
310         virt_cmds.append({
311             con.VIRT_RUN_CMD:
312                 "echo 'http_proxy={}' >> /etc/environment".format(
313                     ns['http_proxy'])})
314
315     if ns.get('https_proxy', ''):
316         virt_cmds.append({
317             con.VIRT_RUN_CMD:
318                 "echo 'https_proxy={}' >> /etc/environment".format(
319                     ns['https_proxy'])})
320
321     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
322     shutil.copyfile(img, tmp_oc_image)
323     logging.debug("Temporary overcloud image stored as: {}".format(
324         tmp_oc_image))
325
326     if ds_opts['vpn']:
327         oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
328         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
329         virt_cmds.append({
330             con.VIRT_RUN_CMD:
331                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
332                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
333         virt_cmds.append({
334             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
335                               "zrpcd_start.sh"})
336         virt_cmds.append({
337             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
338                               "init.d/zrpcd_start.sh' /etc/rc.local "})
339         virt_cmds.append({
340             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
341                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
342         logging.info("ZRPCD process started")
343
344     dataplane = ds_opts['dataplane']
345     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
346         logging.info("Enabling kernel modules for dpdk")
347         # file to module mapping
348         uio_types = {
349             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
350             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
351         }
352         for mod_file, mod in uio_types.items():
353             with open(mod_file, 'w') as fh:
354                 fh.write('#!/bin/bash\n')
355                 fh.write('exec /sbin/modprobe {}'.format(mod))
356                 fh.close()
357
358             virt_cmds.extend([
359                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
360                     mod_file)},
361                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
362                                    "{}".format(os.path.basename(mod_file))}
363             ])
364     if root_pw:
365         pw_op = "password:{}".format(root_pw)
366         virt_cmds.append({con.VIRT_PW: pw_op})
367
368     if dataplane == 'ovs':
369         if ds_opts['sfc']:
370             oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
371         elif sdn == 'opendaylight':
372             # FIXME(trozet) remove this after RDO is updated with fix for
373             # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
374             ovs_file = os.path.basename(con.CUSTOM_OVS)
375             ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
376             utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
377                                             targets=[ovs_file])
378             virt_cmds.extend([
379                 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
380                                                                   ovs_file))},
381                 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
382                     ovs_file)}
383             ])
384
385     if dataplane == 'fdio':
386         # Patch neutron with using OVS external interface for router
387         # and add generic linux NS interface driver
388         virt_cmds.append(
389             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
390                                "-p1 < neutron-patch-NSDriver.patch"})
391         if sdn is False:
392             virt_cmds.extend([
393                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
394                 {con.VIRT_RUN_CMD: "yum install -y "
395                                    "/root/nosdn_vpp_rpms/*.rpm"}
396             ])
397
398     undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
399         'installer_vm']['ip']
400     if sdn == 'opendaylight':
401         oc_builder.inject_opendaylight(
402             odl_version=ds_opts['odl_version'],
403             image=tmp_oc_image,
404             tmp_dir=tmp_dir,
405             uc_ip=undercloud_admin_ip,
406             os_version=ds_opts['os_version'],
407             docker_tag=docker_tag,
408         )
409         if docker_tag:
410             patched_containers = patched_containers.union({'opendaylight'})
411
412     if patches:
413         if ds_opts['os_version'] == 'master':
414             branch = ds_opts['os_version']
415         else:
416             branch = "stable/{}".format(ds_opts['os_version'])
417         logging.info('Adding patches to overcloud')
418         patched_containers = patched_containers.union(
419             c_builder.add_upstream_patches(patches,
420                                            tmp_oc_image, tmp_dir,
421                                            branch,
422                                            uc_ip=undercloud_admin_ip,
423                                            docker_tag=docker_tag))
424     # if containers with ceph, and no ceph device we need to use a
425     # persistent loop device for Ceph OSDs
426     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
427         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
428         with open(tmp_losetup, 'w') as fh:
429             fh.write(LOSETUP_SERVICE)
430         virt_cmds.extend([
431             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
432              },
433             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
434                 .format(LOOP_DEVICE_SIZE)},
435             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
436             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
437         ])
438     # TODO(trozet) remove this after LP#173474 is fixed
439     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
440     virt_cmds.append(
441         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
442                            "ConditionPathExists".format(dhcp_unit)})
443     # Prep for NFS
444     virt_cmds.extend([
445         {con.VIRT_INSTALL: "nfs-utils"},
446         {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
447                            "/etc/systemd/system/multi-user.target.wants/"
448                            "nfs-server.service"},
449         {con.VIRT_RUN_CMD: "mkdir -p /glance"},
450         {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
451         {con.VIRT_RUN_CMD: "mkdir -p /nova"},
452         {con.VIRT_RUN_CMD: "chmod 777 /glance"},
453         {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
454         {con.VIRT_RUN_CMD: "chmod 777 /nova"},
455         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /glance"},
456         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /cinder"},
457         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /nova"},
458         {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
459                            "no_root_squash,no_acl)' > /etc/exports"},
460         {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
461                            "no_root_squash,no_acl)' >> /etc/exports"},
462         {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
463                            "no_root_squash,no_acl)' >> /etc/exports"},
464         {con.VIRT_RUN_CMD: "exportfs -avr"},
465     ])
466     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
467     logging.info("Overcloud image customization complete")
468     return patched_containers
469
470
471 def make_ssh_key():
472     """
473     Creates public and private ssh keys with 1024 bit RSA encryption
474     :return: private, public key
475     """
476     key = rsa.generate_private_key(
477         backend=crypto_default_backend(),
478         public_exponent=65537,
479         key_size=1024
480     )
481
482     private_key = key.private_bytes(
483         crypto_serialization.Encoding.PEM,
484         crypto_serialization.PrivateFormat.PKCS8,
485         crypto_serialization.NoEncryption())
486     public_key = key.public_key().public_bytes(
487         crypto_serialization.Encoding.OpenSSH,
488         crypto_serialization.PublicFormat.OpenSSH
489     )
490     return private_key.decode('utf-8'), public_key.decode('utf-8')
491
492
493 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
494     """
495     Creates modified opnfv/network environments for deployment
496     :param ds: deploy settings
497     :param ns: network settings
498     :param inv: node inventory
499     :param opnfv_env: file path for opnfv-environment file
500     :param net_env: file path for network-environment file
501     :param tmp_dir: Apex tmp dir
502     :return:
503     """
504
505     logging.info("Preparing opnfv-environment and network-environment files")
506     ds_opts = ds['deploy_options']
507     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
508     shutil.copyfile(opnfv_env, tmp_opnfv_env)
509     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
510     tenant_nic = dict()
511     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
512     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
513     external_nic_map = ns['networks']['external'][0]['nic_mapping']
514     external_nic = dict()
515     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
516
517     # SSH keys
518     private_key, public_key = make_ssh_key()
519
520     num_control, num_compute = inv.get_node_counts()
521     if num_control > 1 and not ds['global_params']['ha_enabled']:
522         num_control = 1
523
524     # Make easier/faster variables to index in the file editor
525     if 'performance' in ds_opts:
526         perf = True
527         # vpp
528         if 'vpp' in ds_opts['performance']['Compute']:
529             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
530         else:
531             perf_vpp_comp = None
532         if 'vpp' in ds_opts['performance']['Controller']:
533             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
534         else:
535             perf_vpp_ctrl = None
536
537         # ovs
538         if 'ovs' in ds_opts['performance']['Compute']:
539             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
540         else:
541             perf_ovs_comp = None
542
543         # kernel
544         if 'kernel' in ds_opts['performance']['Compute']:
545             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
546         else:
547             perf_kern_comp = None
548     else:
549         perf = False
550
551     tenant_settings = ns['networks']['tenant']
552     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
553         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
554
555     # Modify OPNFV environment
556     # TODO: Change to build a dict and outputting yaml rather than parsing
557     for line in fileinput.input(tmp_opnfv_env, inplace=True):
558         line = line.strip('\n')
559         output_line = line
560         if 'CloudDomain' in line:
561             output_line = "  CloudDomain: {}".format(ns['domain_name'])
562         elif 'replace_private_key' in line:
563             output_line = "    private_key: |\n"
564             key_out = ''
565             for line in private_key.splitlines():
566                 key_out += "      {}\n".format(line)
567             output_line += key_out
568         elif 'replace_public_key' in line:
569             output_line = "    public_key: '{}'".format(public_key)
570         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
571                 'resource_registry' in line:
572             output_line = "resource_registry:\n" \
573                           "  OS::TripleO::NodeUserData: first-boot.yaml"
574         elif 'ComputeExtraConfigPre' in line and \
575                 ds_opts['dataplane'] == 'ovs_dpdk':
576             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
577                           './ovs-dpdk-preconfig.yaml'
578         elif 'NeutronNetworkVLANRanges' in line:
579             vlan_setting = ''
580             if tenant_vlan_enabled:
581                 if ns['networks']['tenant']['overlay_id_range']:
582                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
583                     if 'datacentre' not in vlan_setting:
584                         vlan_setting += ',datacentre:1:1000'
585             # SRIOV networks are VLAN based provider networks. In order to
586             # simplify the deployment, nfv_sriov will be the default physnet.
587             # VLANs are not needed in advance, and the user will have to create
588             # the network specifying the segmentation-id.
589             if ds_opts['sriov']:
590                 if vlan_setting:
591                     vlan_setting += ",nfv_sriov"
592                 else:
593                     vlan_setting = "datacentre:1:1000,nfv_sriov"
594             if vlan_setting:
595                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
596         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
597             if tenant_settings['overlay_id_range']:
598                 physnets = tenant_settings['overlay_id_range'].split(',')
599                 output_line = "  NeutronBridgeMappings: "
600                 for physnet in physnets:
601                     physnet_name = physnet.split(':')[0]
602                     if physnet_name != 'datacentre':
603                         output_line += "{}:br-vlan,".format(physnet_name)
604                 output_line += "datacentre:br-ex"
605         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
606                 and ds_opts['sdn_controller'] == 'opendaylight':
607             if tenant_settings['overlay_id_range']:
608                 physnets = tenant_settings['overlay_id_range'].split(',')
609                 output_line = "  OpenDaylightProviderMappings: "
610                 for physnet in physnets:
611                     physnet_name = physnet.split(':')[0]
612                     if physnet_name != 'datacentre':
613                         output_line += "{}:br-vlan,".format(physnet_name)
614                 output_line += "datacentre:br-ex"
615         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
616             output_line = "  NeutronNetworkType: vlan\n" \
617                           "  NeutronTunnelTypes: ''"
618
619         if ds_opts['sdn_controller'] == 'opendaylight' and \
620                 'odl_vpp_routing_node' in ds_opts:
621             if 'opendaylight::vpp_routing_node' in line:
622                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
623                                .format(ds_opts['odl_vpp_routing_node'],
624                                        ns['domain_name']))
625         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
626             if 'NeutronVPPAgentPhysnets' in line:
627                 # VPP interface tap0 will be used for external network
628                 # connectivity.
629                 output_line = ("  NeutronVPPAgentPhysnets: "
630                                "'datacentre:{},external:tap0'"
631                                .format(tenant_nic['Controller']))
632         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
633                 'dvr') is True:
634             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
635                 output_line = ''
636             elif 'NeutronDhcpAgentsPerNetwork' in line:
637                 if num_compute == 0:
638                     num_dhcp_agents = num_control
639                 else:
640                     num_dhcp_agents = num_compute
641                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
642                                .format(num_dhcp_agents))
643             elif 'ComputeServices' in line:
644                 output_line = ("  ComputeServices:\n"
645                                "    - OS::TripleO::Services::NeutronDhcpAgent")
646
647         if perf:
648             for role in 'NovaCompute', 'Controller':
649                 if role == 'NovaCompute':
650                     perf_opts = perf_vpp_comp
651                 else:
652                     perf_opts = perf_vpp_ctrl
653                 cfg = "{}ExtraConfig".format(role)
654                 if cfg in line and perf_opts:
655                     perf_line = ''
656                     if 'main-core' in perf_opts:
657                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
658                                       .format(perf_opts['main-core']))
659                     if 'corelist-workers' in perf_opts:
660                         perf_line += ("\n    "
661                                       "fdio::vpp_cpu_corelist_workers: '{}'"
662                                       .format(perf_opts['corelist-workers']))
663                     if ds_opts['sdn_controller'] == 'opendaylight' and \
664                             ds_opts['dataplane'] == 'fdio':
665                         if role == 'NovaCompute':
666                             perf_line += ("\n    "
667                                           "tripleo::profile::base::neutron::"
668                                           "agents::honeycomb::"
669                                           "interface_role_mapping:"
670                                           " ['{}:tenant-interface',"
671                                           "'{}:public-interface']"
672                                           .format(tenant_nic[role],
673                                                   external_nic[role]))
674                         else:
675                             perf_line += ("\n    "
676                                           "tripleo::profile::base::neutron::"
677                                           "agents::honeycomb::"
678                                           "interface_role_mapping:"
679                                           " ['{}:tenant-interface']"
680                                           .format(tenant_nic[role]))
681                     if perf_line:
682                         output_line = ("  {}:{}".format(cfg, perf_line))
683
684             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
685                 for k, v in OVS_PERF_MAP.items():
686                     if k in line and v in perf_ovs_comp:
687                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
688
689             # kernel args
690             # (FIXME) use compute's kernel settings for all nodes for now.
691             if perf_kern_comp:
692                 if 'NovaSchedulerDefaultFilters' in line:
693                     output_line = \
694                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
695                         "ComputeFilter,AvailabilityZoneFilter," \
696                         "ComputeCapabilitiesFilter," \
697                         "ImagePropertiesFilter,NUMATopologyFilter'"
698                 elif 'ComputeKernelArgs' in line:
699                     kernel_args = ''
700                     for k, v in perf_kern_comp.items():
701                         kernel_args += "{}={} ".format(k, v)
702                     if kernel_args:
703                         output_line = "  ComputeKernelArgs: '{}'".\
704                             format(kernel_args)
705
706         print(output_line)
707
708     # Merge compute services into control services if only a single
709     # node deployment
710     if num_compute == 0:
711         with open(tmp_opnfv_env, 'r') as fh:
712             data = yaml.safe_load(fh)
713         param_data = data['parameter_defaults']
714         logging.info("All in one deployment detected")
715         logging.info("Disabling NFS in env file")
716         # Check to see if any parameters are set for Compute
717         for param in param_data.keys():
718             if param != 'ComputeServices' and param.startswith('Compute'):
719                 logging.warning("Compute parameter set, but will not be used "
720                                 "in deployment: {}. Please use Controller "
721                                 "based parameters when using All-in-one "
722                                 "deployments".format(param))
723             if param in NFS_VARS:
724                 param_data[param] = False
725         logging.info("Checking if service merging required into "
726                      "control services")
727         if ('ControllerServices' in param_data and 'ComputeServices' in
728                 param_data):
729             logging.info("Services detected in environment file. Merging...")
730             ctrl_services = param_data['ControllerServices']
731             cmp_services = param_data['ComputeServices']
732             param_data['ControllerServices'] = list(set().union(
733                 ctrl_services, cmp_services))
734             for dup_service in DUPLICATE_COMPUTE_SERVICES:
735                 if dup_service in param_data['ControllerServices']:
736                     param_data['ControllerServices'].remove(dup_service)
737             param_data.pop('ComputeServices')
738             logging.debug("Merged controller services: {}".format(
739                 pprint.pformat(param_data['ControllerServices'])
740             ))
741         else:
742             logging.info("No services detected in env file, not merging "
743                          "services")
744         with open(tmp_opnfv_env, 'w') as fh:
745             yaml.safe_dump(data, fh, default_flow_style=False)
746
747     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
748     with open(tmp_opnfv_env, 'r') as fh:
749         logging.debug("opnfv-environment content is : {}".format(
750             pprint.pformat(yaml.safe_load(fh.read()))
751         ))
752
753
754 def generate_ceph_key():
755     key = os.urandom(16)
756     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
757     return base64.b64encode(header + key)
758
759
760 def prep_storage_env(ds, ns, virtual, tmp_dir):
761     """
762     Creates storage environment file for deployment.  Source file is copied by
763     undercloud playbook to host.
764     :param ds:
765     :param ns:
766     :param virtual:
767     :param tmp_dir:
768     :return:
769     """
770     ds_opts = ds['deploy_options']
771     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
772     if not os.path.isfile(storage_file):
773         logging.error("storage-environment file is not in tmp directory: {}. "
774                       "Check if file was copied from "
775                       "undercloud".format(tmp_dir))
776         raise ApexDeployException("storage-environment file not copied from "
777                                   "undercloud")
778     for line in fileinput.input(storage_file, inplace=True):
779         line = line.strip('\n')
780         if 'CephClusterFSID' in line:
781             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
782         elif 'CephMonKey' in line:
783             print("  CephMonKey: {}".format(generate_ceph_key().decode(
784                 'utf-8')))
785         elif 'CephAdminKey' in line:
786             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
787                 'utf-8')))
788         elif 'CephClientKey' in line:
789             print("  CephClientKey: {}".format(generate_ceph_key().decode(
790                 'utf-8')))
791         else:
792             print(line)
793
794     if ds_opts['containers']:
795         ceph_params = {}
796
797         # max pgs allowed are calculated as num_mons * 200. Therefore we
798         # set number of pgs and pools so that the total will be less:
799         # num_pgs * num_pools * num_osds
800         ceph_params['CephPoolDefaultSize'] = 2
801         ceph_params['CephPoolDefaultPgNum'] = 32
802         if virtual:
803             ceph_params['CephAnsibleExtraConfig'] = {
804                 'centos_package_dependencies': [],
805                 'ceph_osd_docker_memory_limit': '1g',
806                 'ceph_mds_docker_memory_limit': '1g',
807             }
808         ceph_device = ds_opts['ceph_device']
809         ceph_params['CephAnsibleDisksConfig'] = {
810             'devices': [ceph_device],
811             'journal_size': 512,
812             'osd_scenario': 'collocated'
813         }
814         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
815     # TODO(trozet): remove following block as we only support containers now
816     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
817         with open(storage_file, 'a') as fh:
818             fh.write('  ExtraConfig:\n')
819             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
820                 ds_opts['ceph_device']
821             ))
822
823
824 def prep_sriov_env(ds, tmp_dir):
825     """
826     Creates SRIOV environment file for deployment. Source file is copied by
827     undercloud playbook to host.
828     :param ds:
829     :param tmp_dir:
830     :return:
831     """
832     ds_opts = ds['deploy_options']
833     sriov_iface = ds_opts['sriov']
834     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
835     if not os.path.isfile(sriov_file):
836         logging.error("sriov-environment file is not in tmp directory: {}. "
837                       "Check if file was copied from "
838                       "undercloud".format(tmp_dir))
839         raise ApexDeployException("sriov-environment file not copied from "
840                                   "undercloud")
841     # TODO(rnoriega): Instead of line editing, refactor this code to load
842     # yaml file into a dict, edit it and write the file back.
843     for line in fileinput.input(sriov_file, inplace=True):
844         line = line.strip('\n')
845         if 'NovaSchedulerDefaultFilters' in line:
846             print("  {}".format(line[3:]))
847         elif 'NovaSchedulerAvailableFilters' in line:
848             print("  {}".format(line[3:]))
849         elif 'NeutronPhysicalDevMappings' in line:
850             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
851                   .format(sriov_iface))
852         elif 'NeutronSriovNumVFs' in line:
853             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
854         elif 'NovaPCIPassthrough' in line:
855             print("  NovaPCIPassthrough:")
856         elif 'devname' in line:
857             print("    - devname: \"{}\"".format(sriov_iface))
858         elif 'physical_network' in line:
859             print("      physical_network: \"nfv_sriov\"")
860         else:
861             print(line)
862
863
864 def external_network_cmds(ns, ds):
865     """
866     Generates external network openstack commands
867     :param ns: network settings
868     :param ds: deploy settings
869     :return: list of commands to configure external network
870     """
871     ds_opts = ds['deploy_options']
872     external_physnet = 'datacentre'
873     if ds_opts['dataplane'] == 'fdio' and \
874        ds_opts['sdn_controller'] != 'opendaylight':
875         external_physnet = 'external'
876     if 'external' in ns.enabled_network_list:
877         net_config = ns['networks']['external'][0]
878         external = True
879         pool_start, pool_end = net_config['floating_ip_range']
880     else:
881         net_config = ns['networks']['admin']
882         external = False
883         pool_start, pool_end = ns['apex']['networks']['admin'][
884             'introspection_range']
885     nic_config = net_config['nic_mapping']
886     gateway = net_config['gateway']
887     cmds = list()
888     # create network command
889     if nic_config['compute']['vlan'] == 'native':
890         ext_type = 'flat'
891     else:
892         ext_type = "vlan --provider-segment {}".format(nic_config[
893                                                        'compute']['vlan'])
894     cmds.append("openstack network create external --project service "
895                 "--external --provider-network-type {} "
896                 "--provider-physical-network {}"
897                 .format(ext_type, external_physnet))
898     # create subnet command
899     cidr = net_config['cidr']
900     subnet_cmd = "openstack subnet create external-subnet --project " \
901                  "service --network external --no-dhcp --gateway {} " \
902                  "--allocation-pool start={},end={} --subnet-range " \
903                  "{}".format(gateway, pool_start, pool_end, str(cidr))
904     if external and cidr.version == 6:
905         subnet_cmd += ' --ip-version 6'
906     cmds.append(subnet_cmd)
907     logging.debug("Neutron external network commands determined "
908                   "as: {}".format(cmds))
909     return cmds
910
911
912 def create_congress_cmds(overcloud_file):
913     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
914     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
915     logging.info("Creating congress commands")
916     try:
917         ds_cfg = [
918             "username={}".format(overcloudrc['OS_USERNAME']),
919             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
920             "password={}".format(overcloudrc['OS_PASSWORD']),
921             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
922         ]
923     except KeyError:
924         logging.error("Unable to find all keys required for congress in "
925                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
926                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
927                       "file: {}".format(overcloud_file))
928         raise
929     cmds = list()
930     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
931
932     for driver in drivers:
933         if driver == 'doctor':
934             cmd = "{} \"{}\"".format(driver, driver)
935         else:
936             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
937         if driver == 'nova':
938             cmd += ' --config api_version="2.34"'
939         logging.debug("Congress command created: {}".format(cmd))
940         cmds.append(cmd)
941     return cmds