f40c8bd490e9d14fe1fa87ba6ee0d42e50dd7065
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import pprint
16 import shutil
17 import uuid
18 import struct
19 import time
20 import yaml
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
23
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
30     crypto_serialization
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33     crypto_default_backend
34
35
36 SDN_FILE_MAP = {
37     'opendaylight': {
38         'sfc': 'neutron-sfc-opendaylight.yaml',
39         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40         'gluon': 'gluon.yaml',
41         'vpp': {
42             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44             'default': 'neutron-opendaylight-honeycomb.yaml'
45         },
46         'l2gw': 'neutron-l2gw-opendaylight.yaml',
47         'sriov': 'neutron-opendaylight-sriov.yaml',
48         'default': 'neutron-opendaylight.yaml',
49     },
50     'onos': {
51         'sfc': 'neutron-onos-sfc.yaml',
52         'default': 'neutron-onos.yaml'
53     },
54     'ovn': 'neutron-ml2-ovn.yaml',
55     False: {
56         'vpp': 'neutron-ml2-vpp.yaml',
57         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
58     }
59 }
60
61 OTHER_FILE_MAP = {
62     'tacker': 'enable_tacker.yaml',
63     'congress': 'enable_congress.yaml',
64     'barometer': 'enable_barometer.yaml',
65     'rt_kvm': 'enable_rt_kvm.yaml'
66 }
67
68 OVS_PERF_MAP = {
69     'HostCpusList': 'dpdk_cores',
70     'NeutronDpdkCoreList': 'pmd_cores',
71     'NeutronDpdkSocketMemory': 'socket_memory',
72     'NeutronDpdkMemoryChannels': 'memory_channels'
73 }
74
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95 DUPLICATE_COMPUTE_SERVICES = [
96     'OS::TripleO::Services::ComputeNeutronCorePlugin',
97     'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98     'OS::TripleO::Services::ComputeNeutronOvsAgent',
99     'OS::TripleO::Services::ComputeNeutronL3Agent'
100 ]
101
102
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
104     """
105     Builds a list of SDN environment files to be used in the deploy cmd.
106
107     This function recursively searches an sdn_map.  First the sdn controller is
108     matched and then the function looks for enabled features for that
109     controller to determine which environment files should be used.  By
110     default the feature will be checked if set to true in deploy settings to be
111     added to the list.  If a feature does not have a boolean value, then the
112     key and value pair to compare with are checked as a tuple (k,v).
113
114     :param ds: deploy settings
115     :param sdn_map: SDN map to recursively search
116     :param env_list: recursive var to hold previously found env_list
117     :return: A list of env files
118     """
119     if env_list is None:
120         env_list = list()
121     for k, v in sdn_map.items():
122         if ds['sdn_controller'] == k or (k in ds and ds[k]):
123             if isinstance(v, dict):
124                 # Append default SDN env file first
125                 # The assumption is that feature-enabled SDN env files
126                 # override and do not conflict with previously set default
127                 # settings
128                 if ds['sdn_controller'] == k and 'default' in v:
129                     env_list.append(os.path.join(con.THT_ENV_DIR,
130                                                  v['default']))
131                 env_list.extend(build_sdn_env_list(ds, v))
132             # check if the value is not a boolean
133             elif isinstance(v, tuple):
134                     if ds[k] == v[0]:
135                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
136             else:
137                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138     if len(env_list) == 0:
139         try:
140             env_list.append(os.path.join(
141                 con.THT_ENV_DIR, sdn_map['default']))
142         except KeyError:
143             logging.warning("Unable to find default file for SDN")
144
145     return env_list
146
147
148 def get_docker_sdn_files(ds_opts):
149     """
150     Returns docker env file for detected SDN
151     :param ds_opts: deploy options
152     :return: list of docker THT env files for an SDN
153     """
154     docker_services = con.VALID_DOCKER_SERVICES
155     tht_dir = con.THT_DOCKER_ENV_DIR
156     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157     for i, sdn_file in enumerate(sdn_env_list):
158         sdn_base = os.path.basename(sdn_file)
159         if sdn_base in docker_services:
160             if docker_services[sdn_base] is not None:
161                 sdn_env_list[i] = \
162                     os.path.join(tht_dir, docker_services[sdn_base])
163             else:
164                 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
165     return sdn_env_list
166
167
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169                       virtual, env_file='opnfv-environment.yaml',
170                       net_data=False):
171
172     logging.info("Creating deployment command")
173     deploy_options = ['network-environment.yaml']
174
175     ds_opts = ds['deploy_options']
176
177     if ds_opts['containers']:
178         deploy_options.append(os.path.join(con.THT_ENV_DIR,
179                                            'docker.yaml'))
180
181     if ds['global_params']['ha_enabled']:
182         if ds_opts['containers']:
183             deploy_options.append(os.path.join(con.THT_ENV_DIR,
184                                                'docker-ha.yaml'))
185         else:
186             deploy_options.append(os.path.join(con.THT_ENV_DIR,
187                                                'puppet-pacemaker.yaml'))
188
189     if env_file:
190         deploy_options.append(env_file)
191
192     if ds_opts['containers']:
193         deploy_options.append('docker-images.yaml')
194         sdn_docker_files = get_docker_sdn_files(ds_opts)
195         for sdn_docker_file in sdn_docker_files:
196             deploy_options.append(sdn_docker_file)
197     else:
198         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
199
200     for k, v in OTHER_FILE_MAP.items():
201         if k in ds_opts and ds_opts[k]:
202             if ds_opts['containers']:
203                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
204                                                    "{}.yaml".format(k)))
205             else:
206                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
207
208     # TODO(trozet) Fix this check to look for if ceph is in controller services
209     # and not use name of the file
210     if ds_opts['ceph'] and 'csit' not in env_file:
211         prep_storage_env(ds, ns, virtual, tmp_dir)
212         deploy_options.append(os.path.join(con.THT_ENV_DIR,
213                                            'storage-environment.yaml'))
214     if ds_opts['sriov']:
215         prep_sriov_env(ds, tmp_dir)
216
217     # Check for 'k8s' here intentionally, as we may support other values
218     # such as openstack/openshift for 'vim' option.
219     if ds_opts['vim'] == 'k8s':
220         deploy_options.append('kubernetes-environment.yaml')
221
222     if virtual:
223         deploy_options.append('virtual-environment.yaml')
224     else:
225         deploy_options.append('baremetal-environment.yaml')
226
227     num_control, num_compute = inv.get_node_counts()
228     if num_control > 1 and not ds['global_params']['ha_enabled']:
229         num_control = 1
230     if platform.machine() == 'aarch64':
231         # aarch64 deploys were not completing in the default 90 mins.
232         # Not sure if this is related to the hardware the OOO support
233         # was developed on or the virtualization support in CentOS
234         # Either way it will probably get better over time  as the aarch
235         # support matures in CentOS and deploy time should be tested in
236         # the future so this multiplier can be removed.
237         con.DEPLOY_TIMEOUT *= 2
238     cmd = "openstack overcloud deploy --templates --timeout {} " \
239           .format(con.DEPLOY_TIMEOUT)
240     # build cmd env args
241     for option in deploy_options:
242         cmd += " -e {}".format(option)
243     cmd += " --ntp-server {}".format(ns['ntp'][0])
244     cmd += " --control-scale {}".format(num_control)
245     cmd += " --compute-scale {}".format(num_compute)
246     cmd += ' --control-flavor control --compute-flavor compute'
247     if net_data:
248         cmd += ' --networks-file network_data.yaml'
249     libvirt_type = 'kvm'
250     if virtual and (platform.machine() != 'aarch64'):
251         with open('/sys/module/kvm_intel/parameters/nested') as f:
252             nested_kvm = f.read().strip()
253             if nested_kvm != 'Y':
254                 libvirt_type = 'qemu'
255     elif virtual and (platform.machine() == 'aarch64'):
256         libvirt_type = 'qemu'
257     cmd += ' --libvirt-type {}'.format(libvirt_type)
258     if platform.machine() == 'aarch64':
259         cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
260     logging.info("Deploy command set: {}".format(cmd))
261
262     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
263         fh.write(cmd)
264     return cmd
265
266
267 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
268                patches=None):
269     """
270     Locates sdn image and preps for deployment.
271     :param ds: deploy settings
272     :param ns: network settings
273     :param img: sdn image
274     :param tmp_dir: dir to store modified sdn image
275     :param root_pw: password to configure for overcloud image
276     :param docker_tag: Docker image tag for RDO version (default None)
277     :param patches: List of patches to apply to overcloud image
278     :return: None
279     """
280     # TODO(trozet): Come up with a better way to organize this logic in this
281     # function
282     logging.info("Preparing image: {} for deployment".format(img))
283     if not os.path.isfile(img):
284         logging.error("Missing SDN image {}".format(img))
285         raise ApexDeployException("Missing SDN image file: {}".format(img))
286
287     ds_opts = ds['deploy_options']
288     virt_cmds = list()
289     sdn = ds_opts['sdn_controller']
290     patched_containers = set()
291     # we need this due to rhbz #1436021
292     # fixed in systemd-219-37.el7
293     if sdn is not False:
294         logging.info("Neutron openvswitch-agent disabled")
295         virt_cmds.extend([{
296             con.VIRT_RUN_CMD:
297                 "rm -f /etc/systemd/system/multi-user.target.wants/"
298                 "neutron-openvswitch-agent.service"},
299             {
300             con.VIRT_RUN_CMD:
301                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
302                 ".service"
303         }])
304
305     if ns.get('http_proxy', ''):
306         virt_cmds.append({
307             con.VIRT_RUN_CMD:
308                 "echo 'http_proxy={}' >> /etc/environment".format(
309                     ns['http_proxy'])})
310
311     if ns.get('https_proxy', ''):
312         virt_cmds.append({
313             con.VIRT_RUN_CMD:
314                 "echo 'https_proxy={}' >> /etc/environment".format(
315                     ns['https_proxy'])})
316
317     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
318     shutil.copyfile(img, tmp_oc_image)
319     logging.debug("Temporary overcloud image stored as: {}".format(
320         tmp_oc_image))
321
322     if ds_opts['vpn']:
323         oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
324         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
325         virt_cmds.append({
326             con.VIRT_RUN_CMD:
327                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
328                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
329         virt_cmds.append({
330             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
331                               "zrpcd_start.sh"})
332         virt_cmds.append({
333             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
334                               "init.d/zrpcd_start.sh' /etc/rc.local "})
335         virt_cmds.append({
336             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
337                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
338         logging.info("ZRPCD process started")
339
340     dataplane = ds_opts['dataplane']
341     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
342         logging.info("Enabling kernel modules for dpdk")
343         # file to module mapping
344         uio_types = {
345             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
346             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
347         }
348         for mod_file, mod in uio_types.items():
349             with open(mod_file, 'w') as fh:
350                 fh.write('#!/bin/bash\n')
351                 fh.write('exec /sbin/modprobe {}'.format(mod))
352                 fh.close()
353
354             virt_cmds.extend([
355                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
356                     mod_file)},
357                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
358                                    "{}".format(os.path.basename(mod_file))}
359             ])
360     if root_pw:
361         pw_op = "password:{}".format(root_pw)
362         virt_cmds.append({con.VIRT_PW: pw_op})
363
364     if dataplane == 'ovs':
365         if ds_opts['sfc']:
366             oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
367         elif sdn == 'opendaylight':
368             # FIXME(trozet) remove this after RDO is updated with fix for
369             # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
370             ovs_file = os.path.basename(con.CUSTOM_OVS)
371             ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
372             utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
373                                             targets=[ovs_file])
374             virt_cmds.extend([
375                 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
376                                                                   ovs_file))},
377                 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
378                     ovs_file)}
379             ])
380
381     if dataplane == 'fdio':
382         # Patch neutron with using OVS external interface for router
383         # and add generic linux NS interface driver
384         virt_cmds.append(
385             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
386                                "-p1 < neutron-patch-NSDriver.patch"})
387         if sdn is False:
388             virt_cmds.extend([
389                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
390                 {con.VIRT_RUN_CMD: "yum install -y "
391                                    "/root/nosdn_vpp_rpms/*.rpm"}
392             ])
393
394     undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
395         'installer_vm']['ip']
396     if sdn == 'opendaylight':
397         oc_builder.inject_opendaylight(
398             odl_version=ds_opts['odl_version'],
399             image=tmp_oc_image,
400             tmp_dir=tmp_dir,
401             uc_ip=undercloud_admin_ip,
402             os_version=ds_opts['os_version'],
403             docker_tag=docker_tag,
404         )
405         if docker_tag:
406             patched_containers = patched_containers.union({'opendaylight'})
407
408     if patches:
409         if ds_opts['os_version'] == 'master':
410             branch = ds_opts['os_version']
411         else:
412             branch = "stable/{}".format(ds_opts['os_version'])
413         logging.info('Adding patches to overcloud')
414         patched_containers = patched_containers.union(
415             c_builder.add_upstream_patches(patches,
416                                            tmp_oc_image, tmp_dir,
417                                            branch,
418                                            uc_ip=undercloud_admin_ip,
419                                            docker_tag=docker_tag))
420     # if containers with ceph, and no ceph device we need to use a
421     # persistent loop device for Ceph OSDs
422     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
423         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
424         with open(tmp_losetup, 'w') as fh:
425             fh.write(LOSETUP_SERVICE)
426         virt_cmds.extend([
427             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
428              },
429             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
430                 .format(LOOP_DEVICE_SIZE)},
431             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
432             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
433         ])
434     # TODO(trozet) remove this after LP#173474 is fixed
435     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
436     virt_cmds.append(
437         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
438                            "ConditionPathExists".format(dhcp_unit)})
439     # Prep for NFS
440     virt_cmds.extend([
441         {con.VIRT_INSTALL: "nfs-utils"},
442         {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
443                            "/etc/systemd/system/multi-user.target.wants/"
444                            "nfs-server.service"},
445         {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/glance"},
446         {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/cinder"},
447         {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/nova"},
448         {con.VIRT_RUN_CMD: "chmod 777 /root/nfs/glance"},
449         {con.VIRT_RUN_CMD: "chmod 777 /root/nfs/cinder"},
450         {con.VIRT_RUN_CMD: "chmod 777 /root/nfs/nova"},
451         {con.VIRT_RUN_CMD: "echo '/root/nfs/glance *(rw,sync,"
452                            "no_root_squash,no_acl)' > /etc/exports"},
453         {con.VIRT_RUN_CMD: "echo '/root/nfs/cinder *(rw,sync,"
454                            "no_root_squash,no_acl)' >> /etc/exports"},
455         {con.VIRT_RUN_CMD: "echo '/root/nfs/nova *(rw,sync,"
456                            "no_root_squash,no_acl)' >> /etc/exports"},
457         {con.VIRT_RUN_CMD: "exportfs -avr"},
458     ])
459     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
460     logging.info("Overcloud image customization complete")
461     return patched_containers
462
463
464 def make_ssh_key():
465     """
466     Creates public and private ssh keys with 1024 bit RSA encryption
467     :return: private, public key
468     """
469     key = rsa.generate_private_key(
470         backend=crypto_default_backend(),
471         public_exponent=65537,
472         key_size=1024
473     )
474
475     private_key = key.private_bytes(
476         crypto_serialization.Encoding.PEM,
477         crypto_serialization.PrivateFormat.PKCS8,
478         crypto_serialization.NoEncryption())
479     public_key = key.public_key().public_bytes(
480         crypto_serialization.Encoding.OpenSSH,
481         crypto_serialization.PublicFormat.OpenSSH
482     )
483     return private_key.decode('utf-8'), public_key.decode('utf-8')
484
485
486 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
487     """
488     Creates modified opnfv/network environments for deployment
489     :param ds: deploy settings
490     :param ns: network settings
491     :param inv: node inventory
492     :param opnfv_env: file path for opnfv-environment file
493     :param net_env: file path for network-environment file
494     :param tmp_dir: Apex tmp dir
495     :return:
496     """
497
498     logging.info("Preparing opnfv-environment and network-environment files")
499     ds_opts = ds['deploy_options']
500     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
501     shutil.copyfile(opnfv_env, tmp_opnfv_env)
502     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
503     tenant_nic = dict()
504     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
505     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
506     external_nic_map = ns['networks']['external'][0]['nic_mapping']
507     external_nic = dict()
508     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
509
510     # SSH keys
511     private_key, public_key = make_ssh_key()
512
513     num_control, num_compute = inv.get_node_counts()
514     if num_control > 1 and not ds['global_params']['ha_enabled']:
515         num_control = 1
516
517     # Make easier/faster variables to index in the file editor
518     if 'performance' in ds_opts:
519         perf = True
520         # vpp
521         if 'vpp' in ds_opts['performance']['Compute']:
522             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
523         else:
524             perf_vpp_comp = None
525         if 'vpp' in ds_opts['performance']['Controller']:
526             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
527         else:
528             perf_vpp_ctrl = None
529
530         # ovs
531         if 'ovs' in ds_opts['performance']['Compute']:
532             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
533         else:
534             perf_ovs_comp = None
535
536         # kernel
537         if 'kernel' in ds_opts['performance']['Compute']:
538             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
539         else:
540             perf_kern_comp = None
541     else:
542         perf = False
543
544     tenant_settings = ns['networks']['tenant']
545     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
546         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
547
548     # Modify OPNFV environment
549     # TODO: Change to build a dict and outputting yaml rather than parsing
550     for line in fileinput.input(tmp_opnfv_env, inplace=True):
551         line = line.strip('\n')
552         output_line = line
553         if 'CloudDomain' in line:
554             output_line = "  CloudDomain: {}".format(ns['domain_name'])
555         elif 'replace_private_key' in line:
556             output_line = "    private_key: |\n"
557             key_out = ''
558             for line in private_key.splitlines():
559                 key_out += "      {}\n".format(line)
560             output_line += key_out
561         elif 'replace_public_key' in line:
562             output_line = "    public_key: '{}'".format(public_key)
563         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
564                 'resource_registry' in line:
565             output_line = "resource_registry:\n" \
566                           "  OS::TripleO::NodeUserData: first-boot.yaml"
567         elif 'ComputeExtraConfigPre' in line and \
568                 ds_opts['dataplane'] == 'ovs_dpdk':
569             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
570                           './ovs-dpdk-preconfig.yaml'
571         elif 'NeutronNetworkVLANRanges' in line:
572             vlan_setting = ''
573             if tenant_vlan_enabled:
574                 if ns['networks']['tenant']['overlay_id_range']:
575                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
576                     if 'datacentre' not in vlan_setting:
577                         vlan_setting += ',datacentre:1:1000'
578             # SRIOV networks are VLAN based provider networks. In order to
579             # simplify the deployment, nfv_sriov will be the default physnet.
580             # VLANs are not needed in advance, and the user will have to create
581             # the network specifying the segmentation-id.
582             if ds_opts['sriov']:
583                 if vlan_setting:
584                     vlan_setting += ",nfv_sriov"
585                 else:
586                     vlan_setting = "datacentre:1:1000,nfv_sriov"
587             if vlan_setting:
588                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
589         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
590             if tenant_settings['overlay_id_range']:
591                 physnets = tenant_settings['overlay_id_range'].split(',')
592                 output_line = "  NeutronBridgeMappings: "
593                 for physnet in physnets:
594                     physnet_name = physnet.split(':')[0]
595                     if physnet_name != 'datacentre':
596                         output_line += "{}:br-vlan,".format(physnet_name)
597                 output_line += "datacentre:br-ex"
598         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
599                 and ds_opts['sdn_controller'] == 'opendaylight':
600             if tenant_settings['overlay_id_range']:
601                 physnets = tenant_settings['overlay_id_range'].split(',')
602                 output_line = "  OpenDaylightProviderMappings: "
603                 for physnet in physnets:
604                     physnet_name = physnet.split(':')[0]
605                     if physnet_name != 'datacentre':
606                         output_line += "{}:br-vlan,".format(physnet_name)
607                 output_line += "datacentre:br-ex"
608         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
609             output_line = "  NeutronNetworkType: vlan\n" \
610                           "  NeutronTunnelTypes: ''"
611
612         if ds_opts['sdn_controller'] == 'opendaylight' and \
613                 'odl_vpp_routing_node' in ds_opts:
614             if 'opendaylight::vpp_routing_node' in line:
615                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
616                                .format(ds_opts['odl_vpp_routing_node'],
617                                        ns['domain_name']))
618         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
619             if 'NeutronVPPAgentPhysnets' in line:
620                 # VPP interface tap0 will be used for external network
621                 # connectivity.
622                 output_line = ("  NeutronVPPAgentPhysnets: "
623                                "'datacentre:{},external:tap0'"
624                                .format(tenant_nic['Controller']))
625         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
626                 'dvr') is True:
627             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
628                 output_line = ''
629             elif 'NeutronDhcpAgentsPerNetwork' in line:
630                 if num_compute == 0:
631                     num_dhcp_agents = num_control
632                 else:
633                     num_dhcp_agents = num_compute
634                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
635                                .format(num_dhcp_agents))
636             elif 'ComputeServices' in line:
637                 output_line = ("  ComputeServices:\n"
638                                "    - OS::TripleO::Services::NeutronDhcpAgent")
639
640         if perf:
641             for role in 'NovaCompute', 'Controller':
642                 if role == 'NovaCompute':
643                     perf_opts = perf_vpp_comp
644                 else:
645                     perf_opts = perf_vpp_ctrl
646                 cfg = "{}ExtraConfig".format(role)
647                 if cfg in line and perf_opts:
648                     perf_line = ''
649                     if 'main-core' in perf_opts:
650                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
651                                       .format(perf_opts['main-core']))
652                     if 'corelist-workers' in perf_opts:
653                         perf_line += ("\n    "
654                                       "fdio::vpp_cpu_corelist_workers: '{}'"
655                                       .format(perf_opts['corelist-workers']))
656                     if ds_opts['sdn_controller'] == 'opendaylight' and \
657                             ds_opts['dataplane'] == 'fdio':
658                         if role == 'NovaCompute':
659                             perf_line += ("\n    "
660                                           "tripleo::profile::base::neutron::"
661                                           "agents::honeycomb::"
662                                           "interface_role_mapping:"
663                                           " ['{}:tenant-interface',"
664                                           "'{}:public-interface']"
665                                           .format(tenant_nic[role],
666                                                   external_nic[role]))
667                         else:
668                             perf_line += ("\n    "
669                                           "tripleo::profile::base::neutron::"
670                                           "agents::honeycomb::"
671                                           "interface_role_mapping:"
672                                           " ['{}:tenant-interface']"
673                                           .format(tenant_nic[role]))
674                     if perf_line:
675                         output_line = ("  {}:{}".format(cfg, perf_line))
676
677             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
678                 for k, v in OVS_PERF_MAP.items():
679                     if k in line and v in perf_ovs_comp:
680                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
681
682             # kernel args
683             # (FIXME) use compute's kernel settings for all nodes for now.
684             if perf_kern_comp:
685                 if 'NovaSchedulerDefaultFilters' in line:
686                     output_line = \
687                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
688                         "ComputeFilter,AvailabilityZoneFilter," \
689                         "ComputeCapabilitiesFilter," \
690                         "ImagePropertiesFilter,NUMATopologyFilter'"
691                 elif 'ComputeKernelArgs' in line:
692                     kernel_args = ''
693                     for k, v in perf_kern_comp.items():
694                         kernel_args += "{}={} ".format(k, v)
695                     if kernel_args:
696                         output_line = "  ComputeKernelArgs: '{}'".\
697                             format(kernel_args)
698
699         print(output_line)
700
701     # Merge compute services into control services if only a single
702     # node deployment
703     if num_compute == 0:
704         logging.info("All in one deployment. Checking if service merging "
705                      "required into control services")
706         with open(tmp_opnfv_env, 'r') as fh:
707             data = yaml.safe_load(fh)
708         param_data = data['parameter_defaults']
709         # Check to see if any parameters are set for Compute
710         for param in param_data.keys():
711             if param != 'ComputeServices' and param.startswith('Compute'):
712                 logging.warning("Compute parameter set, but will not be used "
713                                 "in deployment: {}. Please use Controller "
714                                 "based parameters when using All-in-one "
715                                 "deployments".format(param))
716         if ('ControllerServices' in param_data and 'ComputeServices' in
717                 param_data):
718             logging.info("Services detected in environment file. Merging...")
719             ctrl_services = param_data['ControllerServices']
720             cmp_services = param_data['ComputeServices']
721             param_data['ControllerServices'] = list(set().union(
722                 ctrl_services, cmp_services))
723             for dup_service in DUPLICATE_COMPUTE_SERVICES:
724                 if dup_service in param_data['ControllerServices']:
725                     param_data['ControllerServices'].remove(dup_service)
726             param_data.pop('ComputeServices')
727             logging.debug("Merged controller services: {}".format(
728                 pprint.pformat(param_data['ControllerServices'])
729             ))
730             with open(tmp_opnfv_env, 'w') as fh:
731                 yaml.safe_dump(data, fh, default_flow_style=False)
732         else:
733             logging.info("No services detected in env file, not merging "
734                          "services")
735
736     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
737     with open(tmp_opnfv_env, 'r') as fh:
738         logging.debug("opnfv-environment content is : {}".format(
739             pprint.pformat(yaml.safe_load(fh.read()))
740         ))
741
742
743 def generate_ceph_key():
744     key = os.urandom(16)
745     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
746     return base64.b64encode(header + key)
747
748
749 def prep_storage_env(ds, ns, virtual, tmp_dir):
750     """
751     Creates storage environment file for deployment.  Source file is copied by
752     undercloud playbook to host.
753     :param ds:
754     :param ns:
755     :param virtual:
756     :param tmp_dir:
757     :return:
758     """
759     ds_opts = ds['deploy_options']
760     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
761     if not os.path.isfile(storage_file):
762         logging.error("storage-environment file is not in tmp directory: {}. "
763                       "Check if file was copied from "
764                       "undercloud".format(tmp_dir))
765         raise ApexDeployException("storage-environment file not copied from "
766                                   "undercloud")
767     for line in fileinput.input(storage_file, inplace=True):
768         line = line.strip('\n')
769         if 'CephClusterFSID' in line:
770             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
771         elif 'CephMonKey' in line:
772             print("  CephMonKey: {}".format(generate_ceph_key().decode(
773                 'utf-8')))
774         elif 'CephAdminKey' in line:
775             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
776                 'utf-8')))
777         elif 'CephClientKey' in line:
778             print("  CephClientKey: {}".format(generate_ceph_key().decode(
779                 'utf-8')))
780         else:
781             print(line)
782
783     if ds_opts['containers']:
784         ceph_params = {}
785
786         # max pgs allowed are calculated as num_mons * 200. Therefore we
787         # set number of pgs and pools so that the total will be less:
788         # num_pgs * num_pools * num_osds
789         ceph_params['CephPoolDefaultSize'] = 2
790         ceph_params['CephPoolDefaultPgNum'] = 32
791         if virtual:
792             ceph_params['CephAnsibleExtraConfig'] = {
793                 'centos_package_dependencies': [],
794                 'ceph_osd_docker_memory_limit': '1g',
795                 'ceph_mds_docker_memory_limit': '1g',
796             }
797         ceph_device = ds_opts['ceph_device']
798         ceph_params['CephAnsibleDisksConfig'] = {
799             'devices': [ceph_device],
800             'journal_size': 512,
801             'osd_scenario': 'collocated'
802         }
803         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
804     # TODO(trozet): remove following block as we only support containers now
805     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
806         with open(storage_file, 'a') as fh:
807             fh.write('  ExtraConfig:\n')
808             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
809                 ds_opts['ceph_device']
810             ))
811
812
813 def prep_sriov_env(ds, tmp_dir):
814     """
815     Creates SRIOV environment file for deployment. Source file is copied by
816     undercloud playbook to host.
817     :param ds:
818     :param tmp_dir:
819     :return:
820     """
821     ds_opts = ds['deploy_options']
822     sriov_iface = ds_opts['sriov']
823     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
824     if not os.path.isfile(sriov_file):
825         logging.error("sriov-environment file is not in tmp directory: {}. "
826                       "Check if file was copied from "
827                       "undercloud".format(tmp_dir))
828         raise ApexDeployException("sriov-environment file not copied from "
829                                   "undercloud")
830     # TODO(rnoriega): Instead of line editing, refactor this code to load
831     # yaml file into a dict, edit it and write the file back.
832     for line in fileinput.input(sriov_file, inplace=True):
833         line = line.strip('\n')
834         if 'NovaSchedulerDefaultFilters' in line:
835             print("  {}".format(line[3:]))
836         elif 'NovaSchedulerAvailableFilters' in line:
837             print("  {}".format(line[3:]))
838         elif 'NeutronPhysicalDevMappings' in line:
839             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
840                   .format(sriov_iface))
841         elif 'NeutronSriovNumVFs' in line:
842             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
843         elif 'NovaPCIPassthrough' in line:
844             print("  NovaPCIPassthrough:")
845         elif 'devname' in line:
846             print("    - devname: \"{}\"".format(sriov_iface))
847         elif 'physical_network' in line:
848             print("      physical_network: \"nfv_sriov\"")
849         else:
850             print(line)
851
852
853 def external_network_cmds(ns, ds):
854     """
855     Generates external network openstack commands
856     :param ns: network settings
857     :param ds: deploy settings
858     :return: list of commands to configure external network
859     """
860     ds_opts = ds['deploy_options']
861     external_physnet = 'datacentre'
862     if ds_opts['dataplane'] == 'fdio' and \
863        ds_opts['sdn_controller'] != 'opendaylight':
864         external_physnet = 'external'
865     if 'external' in ns.enabled_network_list:
866         net_config = ns['networks']['external'][0]
867         external = True
868         pool_start, pool_end = net_config['floating_ip_range']
869     else:
870         net_config = ns['networks']['admin']
871         external = False
872         pool_start, pool_end = ns['apex']['networks']['admin'][
873             'introspection_range']
874     nic_config = net_config['nic_mapping']
875     gateway = net_config['gateway']
876     cmds = list()
877     # create network command
878     if nic_config['compute']['vlan'] == 'native':
879         ext_type = 'flat'
880     else:
881         ext_type = "vlan --provider-segment {}".format(nic_config[
882                                                        'compute']['vlan'])
883     cmds.append("openstack network create external --project service "
884                 "--external --provider-network-type {} "
885                 "--provider-physical-network {}"
886                 .format(ext_type, external_physnet))
887     # create subnet command
888     cidr = net_config['cidr']
889     subnet_cmd = "openstack subnet create external-subnet --project " \
890                  "service --network external --no-dhcp --gateway {} " \
891                  "--allocation-pool start={},end={} --subnet-range " \
892                  "{}".format(gateway, pool_start, pool_end, str(cidr))
893     if external and cidr.version == 6:
894         subnet_cmd += ' --ip-version 6'
895     cmds.append(subnet_cmd)
896     logging.debug("Neutron external network commands determined "
897                   "as: {}".format(cmds))
898     return cmds
899
900
901 def create_congress_cmds(overcloud_file):
902     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
903     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
904     logging.info("Creating congress commands")
905     try:
906         ds_cfg = [
907             "username={}".format(overcloudrc['OS_USERNAME']),
908             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
909             "password={}".format(overcloudrc['OS_PASSWORD']),
910             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
911         ]
912     except KeyError:
913         logging.error("Unable to find all keys required for congress in "
914                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
915                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
916                       "file: {}".format(overcloud_file))
917         raise
918     cmds = list()
919     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
920
921     for driver in drivers:
922         if driver == 'doctor':
923             cmd = "{} \"{}\"".format(driver, driver)
924         else:
925             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
926         if driver == 'nova':
927             cmd += ' --config api_version="2.34"'
928         logging.debug("Congress command created: {}".format(cmd))
929         cmds.append(cmd)
930     return cmds