708a6625793b9cfe78645555f2586ad4df74bbcd
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import pprint
16 import shutil
17 import uuid
18 import struct
19 import time
20 import yaml
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
23
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
30     crypto_serialization
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33     crypto_default_backend
34
35
36 SDN_FILE_MAP = {
37     'opendaylight': {
38         'sfc': 'neutron-sfc-opendaylight.yaml',
39         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40         'gluon': 'gluon.yaml',
41         'vpp': {
42             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44             'default': 'neutron-opendaylight-honeycomb.yaml'
45         },
46         'l2gw': 'neutron-l2gw-opendaylight.yaml',
47         'sriov': 'neutron-opendaylight-sriov.yaml',
48         'default': 'neutron-opendaylight.yaml',
49     },
50     'onos': {
51         'sfc': 'neutron-onos-sfc.yaml',
52         'default': 'neutron-onos.yaml'
53     },
54     'ovn': 'neutron-ml2-ovn.yaml',
55     False: {
56         'vpp': 'neutron-ml2-vpp.yaml',
57         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
58     }
59 }
60
61 OTHER_FILE_MAP = {
62     'tacker': 'enable_tacker.yaml',
63     'congress': 'enable_congress.yaml',
64     'barometer': 'enable_barometer.yaml',
65     'rt_kvm': 'enable_rt_kvm.yaml'
66 }
67
68 OVS_PERF_MAP = {
69     'HostCpusList': 'dpdk_cores',
70     'NeutronDpdkCoreList': 'pmd_cores',
71     'NeutronDpdkSocketMemory': 'socket_memory',
72     'NeutronDpdkMemoryChannels': 'memory_channels'
73 }
74
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95 DUPLICATE_COMPUTE_SERVICES = [
96     'OS::TripleO::Services::ComputeNeutronCorePlugin',
97     'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98     'OS::TripleO::Services::ComputeNeutronOvsAgent',
99     'OS::TripleO::Services::ComputeNeutronL3Agent'
100 ]
101
102
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
104     """
105     Builds a list of SDN environment files to be used in the deploy cmd.
106
107     This function recursively searches an sdn_map.  First the sdn controller is
108     matched and then the function looks for enabled features for that
109     controller to determine which environment files should be used.  By
110     default the feature will be checked if set to true in deploy settings to be
111     added to the list.  If a feature does not have a boolean value, then the
112     key and value pair to compare with are checked as a tuple (k,v).
113
114     :param ds: deploy settings
115     :param sdn_map: SDN map to recursively search
116     :param env_list: recursive var to hold previously found env_list
117     :return: A list of env files
118     """
119     if env_list is None:
120         env_list = list()
121     for k, v in sdn_map.items():
122         if ds['sdn_controller'] == k or (k in ds and ds[k]):
123             if isinstance(v, dict):
124                 # Append default SDN env file first
125                 # The assumption is that feature-enabled SDN env files
126                 # override and do not conflict with previously set default
127                 # settings
128                 if ds['sdn_controller'] == k and 'default' in v:
129                     env_list.append(os.path.join(con.THT_ENV_DIR,
130                                                  v['default']))
131                 env_list.extend(build_sdn_env_list(ds, v))
132             # check if the value is not a boolean
133             elif isinstance(v, tuple):
134                     if ds[k] == v[0]:
135                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
136             else:
137                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138     if len(env_list) == 0:
139         try:
140             env_list.append(os.path.join(
141                 con.THT_ENV_DIR, sdn_map['default']))
142         except KeyError:
143             logging.warning("Unable to find default file for SDN")
144
145     return env_list
146
147
148 def get_docker_sdn_files(ds_opts):
149     """
150     Returns docker env file for detected SDN
151     :param ds_opts: deploy options
152     :return: list of docker THT env files for an SDN
153     """
154     docker_services = con.VALID_DOCKER_SERVICES
155     tht_dir = con.THT_DOCKER_ENV_DIR
156     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157     for i, sdn_file in enumerate(sdn_env_list):
158         sdn_base = os.path.basename(sdn_file)
159         if sdn_base in docker_services:
160             if docker_services[sdn_base] is not None:
161                 sdn_env_list[i] = \
162                     os.path.join(tht_dir, docker_services[sdn_base])
163             else:
164                 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
165     return sdn_env_list
166
167
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169                       virtual, env_file='opnfv-environment.yaml',
170                       net_data=False):
171
172     logging.info("Creating deployment command")
173     deploy_options = ['network-environment.yaml']
174
175     ds_opts = ds['deploy_options']
176
177     if ds_opts['containers']:
178         deploy_options.append(os.path.join(con.THT_ENV_DIR,
179                                            'docker.yaml'))
180
181     if ds['global_params']['ha_enabled']:
182         if ds_opts['containers']:
183             deploy_options.append(os.path.join(con.THT_ENV_DIR,
184                                                'docker-ha.yaml'))
185         else:
186             deploy_options.append(os.path.join(con.THT_ENV_DIR,
187                                                'puppet-pacemaker.yaml'))
188
189     if env_file:
190         deploy_options.append(env_file)
191
192     if ds_opts['containers']:
193         deploy_options.append('docker-images.yaml')
194         sdn_docker_files = get_docker_sdn_files(ds_opts)
195         for sdn_docker_file in sdn_docker_files:
196             deploy_options.append(sdn_docker_file)
197     else:
198         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
199
200     for k, v in OTHER_FILE_MAP.items():
201         if k in ds_opts and ds_opts[k]:
202             if ds_opts['containers']:
203                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
204                                                    "{}.yaml".format(k)))
205             else:
206                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
207
208     # TODO(trozet) Fix this check to look for if ceph is in controller services
209     # and not use name of the file
210     if ds_opts['ceph'] and 'csit' not in env_file:
211         prep_storage_env(ds, ns, virtual, tmp_dir)
212         deploy_options.append(os.path.join(con.THT_ENV_DIR,
213                                            'storage-environment.yaml'))
214     if ds_opts['sriov']:
215         prep_sriov_env(ds, tmp_dir)
216
217     # Check for 'k8s' here intentionally, as we may support other values
218     # such as openstack/openshift for 'vim' option.
219     if ds_opts['vim'] == 'k8s':
220         deploy_options.append('kubernetes-environment.yaml')
221
222     if virtual:
223         deploy_options.append('virtual-environment.yaml')
224     else:
225         deploy_options.append('baremetal-environment.yaml')
226
227     num_control, num_compute = inv.get_node_counts()
228     if num_control > 1 and not ds['global_params']['ha_enabled']:
229         num_control = 1
230     if platform.machine() == 'aarch64':
231         # aarch64 deploys were not completing in the default 90 mins.
232         # Not sure if this is related to the hardware the OOO support
233         # was developed on or the virtualization support in CentOS
234         # Either way it will probably get better over time  as the aarch
235         # support matures in CentOS and deploy time should be tested in
236         # the future so this multiplier can be removed.
237         con.DEPLOY_TIMEOUT *= 2
238     cmd = "openstack overcloud deploy --templates --timeout {} " \
239           .format(con.DEPLOY_TIMEOUT)
240     # build cmd env args
241     for option in deploy_options:
242         cmd += " -e {}".format(option)
243     cmd += " --ntp-server {}".format(ns['ntp'][0])
244     cmd += " --control-scale {}".format(num_control)
245     cmd += " --compute-scale {}".format(num_compute)
246     cmd += ' --control-flavor control --compute-flavor compute'
247     if net_data:
248         cmd += ' --networks-file network_data.yaml'
249     libvirt_type = 'kvm'
250     if virtual and (platform.machine() != 'aarch64'):
251         with open('/sys/module/kvm_intel/parameters/nested') as f:
252             nested_kvm = f.read().strip()
253             if nested_kvm != 'Y':
254                 libvirt_type = 'qemu'
255     elif virtual and (platform.machine() == 'aarch64'):
256         libvirt_type = 'qemu'
257     cmd += ' --libvirt-type {}'.format(libvirt_type)
258     if platform.machine() == 'aarch64':
259         cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
260     logging.info("Deploy command set: {}".format(cmd))
261
262     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
263         fh.write(cmd)
264     return cmd
265
266
267 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
268                patches=None):
269     """
270     Locates sdn image and preps for deployment.
271     :param ds: deploy settings
272     :param ns: network settings
273     :param img: sdn image
274     :param tmp_dir: dir to store modified sdn image
275     :param root_pw: password to configure for overcloud image
276     :param docker_tag: Docker image tag for RDO version (default None)
277     :param patches: List of patches to apply to overcloud image
278     :return: None
279     """
280     # TODO(trozet): Come up with a better way to organize this logic in this
281     # function
282     logging.info("Preparing image: {} for deployment".format(img))
283     if not os.path.isfile(img):
284         logging.error("Missing SDN image {}".format(img))
285         raise ApexDeployException("Missing SDN image file: {}".format(img))
286
287     ds_opts = ds['deploy_options']
288     virt_cmds = list()
289     sdn = ds_opts['sdn_controller']
290     patched_containers = set()
291     # we need this due to rhbz #1436021
292     # fixed in systemd-219-37.el7
293     if sdn is not False:
294         logging.info("Neutron openvswitch-agent disabled")
295         virt_cmds.extend([{
296             con.VIRT_RUN_CMD:
297                 "rm -f /etc/systemd/system/multi-user.target.wants/"
298                 "neutron-openvswitch-agent.service"},
299             {
300             con.VIRT_RUN_CMD:
301                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
302                 ".service"
303         }])
304
305     if ns.get('http_proxy', ''):
306         virt_cmds.append({
307             con.VIRT_RUN_CMD:
308                 "echo 'http_proxy={}' >> /etc/environment".format(
309                     ns['http_proxy'])})
310
311     if ns.get('https_proxy', ''):
312         virt_cmds.append({
313             con.VIRT_RUN_CMD:
314                 "echo 'https_proxy={}' >> /etc/environment".format(
315                     ns['https_proxy'])})
316
317     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
318     shutil.copyfile(img, tmp_oc_image)
319     logging.debug("Temporary overcloud image stored as: {}".format(
320         tmp_oc_image))
321
322     if ds_opts['vpn']:
323         oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
324         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
325         virt_cmds.append({
326             con.VIRT_RUN_CMD:
327                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
328                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
329         virt_cmds.append({
330             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
331                               "zrpcd_start.sh"})
332         virt_cmds.append({
333             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
334                               "init.d/zrpcd_start.sh' /etc/rc.local "})
335         virt_cmds.append({
336             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
337                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
338         logging.info("ZRPCD process started")
339
340     dataplane = ds_opts['dataplane']
341     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
342         logging.info("Enabling kernel modules for dpdk")
343         # file to module mapping
344         uio_types = {
345             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
346             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
347         }
348         for mod_file, mod in uio_types.items():
349             with open(mod_file, 'w') as fh:
350                 fh.write('#!/bin/bash\n')
351                 fh.write('exec /sbin/modprobe {}'.format(mod))
352                 fh.close()
353
354             virt_cmds.extend([
355                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
356                     mod_file)},
357                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
358                                    "{}".format(os.path.basename(mod_file))}
359             ])
360     if root_pw:
361         pw_op = "password:{}".format(root_pw)
362         virt_cmds.append({con.VIRT_PW: pw_op})
363
364     if dataplane == 'ovs':
365         if ds_opts['sfc']:
366             oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
367         elif sdn == 'opendaylight':
368             # FIXME(trozet) remove this after RDO is updated with fix for
369             # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
370             ovs_file = os.path.basename(con.CUSTOM_OVS)
371             ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
372             utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
373                                             targets=[ovs_file])
374             virt_cmds.extend([
375                 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
376                                                                   ovs_file))},
377                 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
378                     ovs_file)}
379             ])
380
381     if dataplane == 'fdio':
382         # Patch neutron with using OVS external interface for router
383         # and add generic linux NS interface driver
384         virt_cmds.append(
385             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
386                                "-p1 < neutron-patch-NSDriver.patch"})
387         if sdn is False:
388             virt_cmds.extend([
389                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
390                 {con.VIRT_RUN_CMD: "yum install -y "
391                                    "/root/nosdn_vpp_rpms/*.rpm"}
392             ])
393
394     undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
395         'installer_vm']['ip']
396     if sdn == 'opendaylight':
397         oc_builder.inject_opendaylight(
398             odl_version=ds_opts['odl_version'],
399             image=tmp_oc_image,
400             tmp_dir=tmp_dir,
401             uc_ip=undercloud_admin_ip,
402             os_version=ds_opts['os_version'],
403             docker_tag=docker_tag,
404         )
405         if docker_tag:
406             patched_containers = patched_containers.union({'opendaylight'})
407
408     if patches:
409         if ds_opts['os_version'] == 'master':
410             branch = ds_opts['os_version']
411         else:
412             branch = "stable/{}".format(ds_opts['os_version'])
413         logging.info('Adding patches to overcloud')
414         patched_containers = patched_containers.union(
415             c_builder.add_upstream_patches(patches,
416                                            tmp_oc_image, tmp_dir,
417                                            branch,
418                                            uc_ip=undercloud_admin_ip,
419                                            docker_tag=docker_tag))
420     # if containers with ceph, and no ceph device we need to use a
421     # persistent loop device for Ceph OSDs
422     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
423         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
424         with open(tmp_losetup, 'w') as fh:
425             fh.write(LOSETUP_SERVICE)
426         virt_cmds.extend([
427             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
428              },
429             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
430                 .format(LOOP_DEVICE_SIZE)},
431             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
432             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
433         ])
434     # TODO(trozet) remove this after LP#173474 is fixed
435     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
436     virt_cmds.append(
437         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
438                            "ConditionPathExists".format(dhcp_unit)})
439     # Prep for NFS
440     virt_cmds.extend([
441         {con.VIRT_INSTALL: "nfs-utils"},
442         {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
443                            "/etc/systemd/system/multi-user.target.wants/"
444                            "nfs-server.service"},
445         {con.VIRT_RUN_CMD: "mkdir -p /glance"},
446         {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
447         {con.VIRT_RUN_CMD: "mkdir -p /nova"},
448         {con.VIRT_RUN_CMD: "chmod 777 /glance"},
449         {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
450         {con.VIRT_RUN_CMD: "chmod 777 /nova"},
451         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /glance"},
452         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /cinder"},
453         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /nova"},
454         {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
455                            "no_root_squash,no_acl)' > /etc/exports"},
456         {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
457                            "no_root_squash,no_acl)' >> /etc/exports"},
458         {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
459                            "no_root_squash,no_acl)' >> /etc/exports"},
460         {con.VIRT_RUN_CMD: "exportfs -avr"},
461     ])
462     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
463     logging.info("Overcloud image customization complete")
464     return patched_containers
465
466
467 def make_ssh_key():
468     """
469     Creates public and private ssh keys with 1024 bit RSA encryption
470     :return: private, public key
471     """
472     key = rsa.generate_private_key(
473         backend=crypto_default_backend(),
474         public_exponent=65537,
475         key_size=1024
476     )
477
478     private_key = key.private_bytes(
479         crypto_serialization.Encoding.PEM,
480         crypto_serialization.PrivateFormat.PKCS8,
481         crypto_serialization.NoEncryption())
482     public_key = key.public_key().public_bytes(
483         crypto_serialization.Encoding.OpenSSH,
484         crypto_serialization.PublicFormat.OpenSSH
485     )
486     return private_key.decode('utf-8'), public_key.decode('utf-8')
487
488
489 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
490     """
491     Creates modified opnfv/network environments for deployment
492     :param ds: deploy settings
493     :param ns: network settings
494     :param inv: node inventory
495     :param opnfv_env: file path for opnfv-environment file
496     :param net_env: file path for network-environment file
497     :param tmp_dir: Apex tmp dir
498     :return:
499     """
500
501     logging.info("Preparing opnfv-environment and network-environment files")
502     ds_opts = ds['deploy_options']
503     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
504     shutil.copyfile(opnfv_env, tmp_opnfv_env)
505     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
506     tenant_nic = dict()
507     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
508     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
509     external_nic_map = ns['networks']['external'][0]['nic_mapping']
510     external_nic = dict()
511     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
512
513     # SSH keys
514     private_key, public_key = make_ssh_key()
515
516     num_control, num_compute = inv.get_node_counts()
517     if num_control > 1 and not ds['global_params']['ha_enabled']:
518         num_control = 1
519
520     # Make easier/faster variables to index in the file editor
521     if 'performance' in ds_opts:
522         perf = True
523         # vpp
524         if 'vpp' in ds_opts['performance']['Compute']:
525             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
526         else:
527             perf_vpp_comp = None
528         if 'vpp' in ds_opts['performance']['Controller']:
529             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
530         else:
531             perf_vpp_ctrl = None
532
533         # ovs
534         if 'ovs' in ds_opts['performance']['Compute']:
535             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
536         else:
537             perf_ovs_comp = None
538
539         # kernel
540         if 'kernel' in ds_opts['performance']['Compute']:
541             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
542         else:
543             perf_kern_comp = None
544     else:
545         perf = False
546
547     tenant_settings = ns['networks']['tenant']
548     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
549         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
550
551     # Modify OPNFV environment
552     # TODO: Change to build a dict and outputting yaml rather than parsing
553     for line in fileinput.input(tmp_opnfv_env, inplace=True):
554         line = line.strip('\n')
555         output_line = line
556         if 'CloudDomain' in line:
557             output_line = "  CloudDomain: {}".format(ns['domain_name'])
558         elif 'replace_private_key' in line:
559             output_line = "    private_key: |\n"
560             key_out = ''
561             for line in private_key.splitlines():
562                 key_out += "      {}\n".format(line)
563             output_line += key_out
564         elif 'replace_public_key' in line:
565             output_line = "    public_key: '{}'".format(public_key)
566         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
567                 'resource_registry' in line:
568             output_line = "resource_registry:\n" \
569                           "  OS::TripleO::NodeUserData: first-boot.yaml"
570         elif 'ComputeExtraConfigPre' in line and \
571                 ds_opts['dataplane'] == 'ovs_dpdk':
572             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
573                           './ovs-dpdk-preconfig.yaml'
574         elif 'NeutronNetworkVLANRanges' in line:
575             vlan_setting = ''
576             if tenant_vlan_enabled:
577                 if ns['networks']['tenant']['overlay_id_range']:
578                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
579                     if 'datacentre' not in vlan_setting:
580                         vlan_setting += ',datacentre:1:1000'
581             # SRIOV networks are VLAN based provider networks. In order to
582             # simplify the deployment, nfv_sriov will be the default physnet.
583             # VLANs are not needed in advance, and the user will have to create
584             # the network specifying the segmentation-id.
585             if ds_opts['sriov']:
586                 if vlan_setting:
587                     vlan_setting += ",nfv_sriov"
588                 else:
589                     vlan_setting = "datacentre:1:1000,nfv_sriov"
590             if vlan_setting:
591                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
592         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
593             if tenant_settings['overlay_id_range']:
594                 physnets = tenant_settings['overlay_id_range'].split(',')
595                 output_line = "  NeutronBridgeMappings: "
596                 for physnet in physnets:
597                     physnet_name = physnet.split(':')[0]
598                     if physnet_name != 'datacentre':
599                         output_line += "{}:br-vlan,".format(physnet_name)
600                 output_line += "datacentre:br-ex"
601         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
602                 and ds_opts['sdn_controller'] == 'opendaylight':
603             if tenant_settings['overlay_id_range']:
604                 physnets = tenant_settings['overlay_id_range'].split(',')
605                 output_line = "  OpenDaylightProviderMappings: "
606                 for physnet in physnets:
607                     physnet_name = physnet.split(':')[0]
608                     if physnet_name != 'datacentre':
609                         output_line += "{}:br-vlan,".format(physnet_name)
610                 output_line += "datacentre:br-ex"
611         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
612             output_line = "  NeutronNetworkType: vlan\n" \
613                           "  NeutronTunnelTypes: ''"
614
615         if ds_opts['sdn_controller'] == 'opendaylight' and \
616                 'odl_vpp_routing_node' in ds_opts:
617             if 'opendaylight::vpp_routing_node' in line:
618                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
619                                .format(ds_opts['odl_vpp_routing_node'],
620                                        ns['domain_name']))
621         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
622             if 'NeutronVPPAgentPhysnets' in line:
623                 # VPP interface tap0 will be used for external network
624                 # connectivity.
625                 output_line = ("  NeutronVPPAgentPhysnets: "
626                                "'datacentre:{},external:tap0'"
627                                .format(tenant_nic['Controller']))
628         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
629                 'dvr') is True:
630             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
631                 output_line = ''
632             elif 'NeutronDhcpAgentsPerNetwork' in line:
633                 if num_compute == 0:
634                     num_dhcp_agents = num_control
635                 else:
636                     num_dhcp_agents = num_compute
637                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
638                                .format(num_dhcp_agents))
639             elif 'ComputeServices' in line:
640                 output_line = ("  ComputeServices:\n"
641                                "    - OS::TripleO::Services::NeutronDhcpAgent")
642
643         if perf:
644             for role in 'NovaCompute', 'Controller':
645                 if role == 'NovaCompute':
646                     perf_opts = perf_vpp_comp
647                 else:
648                     perf_opts = perf_vpp_ctrl
649                 cfg = "{}ExtraConfig".format(role)
650                 if cfg in line and perf_opts:
651                     perf_line = ''
652                     if 'main-core' in perf_opts:
653                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
654                                       .format(perf_opts['main-core']))
655                     if 'corelist-workers' in perf_opts:
656                         perf_line += ("\n    "
657                                       "fdio::vpp_cpu_corelist_workers: '{}'"
658                                       .format(perf_opts['corelist-workers']))
659                     if ds_opts['sdn_controller'] == 'opendaylight' and \
660                             ds_opts['dataplane'] == 'fdio':
661                         if role == 'NovaCompute':
662                             perf_line += ("\n    "
663                                           "tripleo::profile::base::neutron::"
664                                           "agents::honeycomb::"
665                                           "interface_role_mapping:"
666                                           " ['{}:tenant-interface',"
667                                           "'{}:public-interface']"
668                                           .format(tenant_nic[role],
669                                                   external_nic[role]))
670                         else:
671                             perf_line += ("\n    "
672                                           "tripleo::profile::base::neutron::"
673                                           "agents::honeycomb::"
674                                           "interface_role_mapping:"
675                                           " ['{}:tenant-interface']"
676                                           .format(tenant_nic[role]))
677                     if perf_line:
678                         output_line = ("  {}:{}".format(cfg, perf_line))
679
680             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
681                 for k, v in OVS_PERF_MAP.items():
682                     if k in line and v in perf_ovs_comp:
683                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
684
685             # kernel args
686             # (FIXME) use compute's kernel settings for all nodes for now.
687             if perf_kern_comp:
688                 if 'NovaSchedulerDefaultFilters' in line:
689                     output_line = \
690                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
691                         "ComputeFilter,AvailabilityZoneFilter," \
692                         "ComputeCapabilitiesFilter," \
693                         "ImagePropertiesFilter,NUMATopologyFilter'"
694                 elif 'ComputeKernelArgs' in line:
695                     kernel_args = ''
696                     for k, v in perf_kern_comp.items():
697                         kernel_args += "{}={} ".format(k, v)
698                     if kernel_args:
699                         output_line = "  ComputeKernelArgs: '{}'".\
700                             format(kernel_args)
701
702         print(output_line)
703
704     # Merge compute services into control services if only a single
705     # node deployment
706     if num_compute == 0:
707         logging.info("All in one deployment. Checking if service merging "
708                      "required into control services")
709         with open(tmp_opnfv_env, 'r') as fh:
710             data = yaml.safe_load(fh)
711         param_data = data['parameter_defaults']
712         # Check to see if any parameters are set for Compute
713         for param in param_data.keys():
714             if param != 'ComputeServices' and param.startswith('Compute'):
715                 logging.warning("Compute parameter set, but will not be used "
716                                 "in deployment: {}. Please use Controller "
717                                 "based parameters when using All-in-one "
718                                 "deployments".format(param))
719         if ('ControllerServices' in param_data and 'ComputeServices' in
720                 param_data):
721             logging.info("Services detected in environment file. Merging...")
722             ctrl_services = param_data['ControllerServices']
723             cmp_services = param_data['ComputeServices']
724             param_data['ControllerServices'] = list(set().union(
725                 ctrl_services, cmp_services))
726             for dup_service in DUPLICATE_COMPUTE_SERVICES:
727                 if dup_service in param_data['ControllerServices']:
728                     param_data['ControllerServices'].remove(dup_service)
729             param_data.pop('ComputeServices')
730             logging.debug("Merged controller services: {}".format(
731                 pprint.pformat(param_data['ControllerServices'])
732             ))
733             with open(tmp_opnfv_env, 'w') as fh:
734                 yaml.safe_dump(data, fh, default_flow_style=False)
735         else:
736             logging.info("No services detected in env file, not merging "
737                          "services")
738
739     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
740     with open(tmp_opnfv_env, 'r') as fh:
741         logging.debug("opnfv-environment content is : {}".format(
742             pprint.pformat(yaml.safe_load(fh.read()))
743         ))
744
745
746 def generate_ceph_key():
747     key = os.urandom(16)
748     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
749     return base64.b64encode(header + key)
750
751
752 def prep_storage_env(ds, ns, virtual, tmp_dir):
753     """
754     Creates storage environment file for deployment.  Source file is copied by
755     undercloud playbook to host.
756     :param ds:
757     :param ns:
758     :param virtual:
759     :param tmp_dir:
760     :return:
761     """
762     ds_opts = ds['deploy_options']
763     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
764     if not os.path.isfile(storage_file):
765         logging.error("storage-environment file is not in tmp directory: {}. "
766                       "Check if file was copied from "
767                       "undercloud".format(tmp_dir))
768         raise ApexDeployException("storage-environment file not copied from "
769                                   "undercloud")
770     for line in fileinput.input(storage_file, inplace=True):
771         line = line.strip('\n')
772         if 'CephClusterFSID' in line:
773             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
774         elif 'CephMonKey' in line:
775             print("  CephMonKey: {}".format(generate_ceph_key().decode(
776                 'utf-8')))
777         elif 'CephAdminKey' in line:
778             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
779                 'utf-8')))
780         elif 'CephClientKey' in line:
781             print("  CephClientKey: {}".format(generate_ceph_key().decode(
782                 'utf-8')))
783         else:
784             print(line)
785
786     if ds_opts['containers']:
787         ceph_params = {}
788
789         # max pgs allowed are calculated as num_mons * 200. Therefore we
790         # set number of pgs and pools so that the total will be less:
791         # num_pgs * num_pools * num_osds
792         ceph_params['CephPoolDefaultSize'] = 2
793         ceph_params['CephPoolDefaultPgNum'] = 32
794         if virtual:
795             ceph_params['CephAnsibleExtraConfig'] = {
796                 'centos_package_dependencies': [],
797                 'ceph_osd_docker_memory_limit': '1g',
798                 'ceph_mds_docker_memory_limit': '1g',
799             }
800         ceph_device = ds_opts['ceph_device']
801         ceph_params['CephAnsibleDisksConfig'] = {
802             'devices': [ceph_device],
803             'journal_size': 512,
804             'osd_scenario': 'collocated'
805         }
806         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
807     # TODO(trozet): remove following block as we only support containers now
808     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
809         with open(storage_file, 'a') as fh:
810             fh.write('  ExtraConfig:\n')
811             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
812                 ds_opts['ceph_device']
813             ))
814
815
816 def prep_sriov_env(ds, tmp_dir):
817     """
818     Creates SRIOV environment file for deployment. Source file is copied by
819     undercloud playbook to host.
820     :param ds:
821     :param tmp_dir:
822     :return:
823     """
824     ds_opts = ds['deploy_options']
825     sriov_iface = ds_opts['sriov']
826     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
827     if not os.path.isfile(sriov_file):
828         logging.error("sriov-environment file is not in tmp directory: {}. "
829                       "Check if file was copied from "
830                       "undercloud".format(tmp_dir))
831         raise ApexDeployException("sriov-environment file not copied from "
832                                   "undercloud")
833     # TODO(rnoriega): Instead of line editing, refactor this code to load
834     # yaml file into a dict, edit it and write the file back.
835     for line in fileinput.input(sriov_file, inplace=True):
836         line = line.strip('\n')
837         if 'NovaSchedulerDefaultFilters' in line:
838             print("  {}".format(line[3:]))
839         elif 'NovaSchedulerAvailableFilters' in line:
840             print("  {}".format(line[3:]))
841         elif 'NeutronPhysicalDevMappings' in line:
842             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
843                   .format(sriov_iface))
844         elif 'NeutronSriovNumVFs' in line:
845             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
846         elif 'NovaPCIPassthrough' in line:
847             print("  NovaPCIPassthrough:")
848         elif 'devname' in line:
849             print("    - devname: \"{}\"".format(sriov_iface))
850         elif 'physical_network' in line:
851             print("      physical_network: \"nfv_sriov\"")
852         else:
853             print(line)
854
855
856 def external_network_cmds(ns, ds):
857     """
858     Generates external network openstack commands
859     :param ns: network settings
860     :param ds: deploy settings
861     :return: list of commands to configure external network
862     """
863     ds_opts = ds['deploy_options']
864     external_physnet = 'datacentre'
865     if ds_opts['dataplane'] == 'fdio' and \
866        ds_opts['sdn_controller'] != 'opendaylight':
867         external_physnet = 'external'
868     if 'external' in ns.enabled_network_list:
869         net_config = ns['networks']['external'][0]
870         external = True
871         pool_start, pool_end = net_config['floating_ip_range']
872     else:
873         net_config = ns['networks']['admin']
874         external = False
875         pool_start, pool_end = ns['apex']['networks']['admin'][
876             'introspection_range']
877     nic_config = net_config['nic_mapping']
878     gateway = net_config['gateway']
879     cmds = list()
880     # create network command
881     if nic_config['compute']['vlan'] == 'native':
882         ext_type = 'flat'
883     else:
884         ext_type = "vlan --provider-segment {}".format(nic_config[
885                                                        'compute']['vlan'])
886     cmds.append("openstack network create external --project service "
887                 "--external --provider-network-type {} "
888                 "--provider-physical-network {}"
889                 .format(ext_type, external_physnet))
890     # create subnet command
891     cidr = net_config['cidr']
892     subnet_cmd = "openstack subnet create external-subnet --project " \
893                  "service --network external --no-dhcp --gateway {} " \
894                  "--allocation-pool start={},end={} --subnet-range " \
895                  "{}".format(gateway, pool_start, pool_end, str(cidr))
896     if external and cidr.version == 6:
897         subnet_cmd += ' --ip-version 6'
898     cmds.append(subnet_cmd)
899     logging.debug("Neutron external network commands determined "
900                   "as: {}".format(cmds))
901     return cmds
902
903
904 def create_congress_cmds(overcloud_file):
905     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
906     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
907     logging.info("Creating congress commands")
908     try:
909         ds_cfg = [
910             "username={}".format(overcloudrc['OS_USERNAME']),
911             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
912             "password={}".format(overcloudrc['OS_PASSWORD']),
913             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
914         ]
915     except KeyError:
916         logging.error("Unable to find all keys required for congress in "
917                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
918                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
919                       "file: {}".format(overcloud_file))
920         raise
921     cmds = list()
922     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
923
924     for driver in drivers:
925         if driver == 'doctor':
926             cmd = "{} \"{}\"".format(driver, driver)
927         else:
928             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
929         if driver == 'nova':
930             cmd += ' --config api_version="2.34"'
931         logging.debug("Congress command created: {}".format(cmd))
932         cmds.append(cmd)
933     return cmds