41b2d56b42b6692f7f82b53f564aac8a4b9b17c7
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import pprint
16 import shutil
17 import uuid
18 import struct
19 import time
20 import yaml
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
23
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
30     crypto_serialization
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33     crypto_default_backend
34
35
36 SDN_FILE_MAP = {
37     'opendaylight': {
38         'sfc': 'neutron-sfc-opendaylight.yaml',
39         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40         'gluon': 'gluon.yaml',
41         'vpp': {
42             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44             'default': 'neutron-opendaylight-honeycomb.yaml'
45         },
46         'l2gw': 'neutron-l2gw-opendaylight.yaml',
47         'sriov': 'neutron-opendaylight-sriov.yaml',
48         'default': 'neutron-opendaylight.yaml',
49     },
50     'onos': {
51         'sfc': 'neutron-onos-sfc.yaml',
52         'default': 'neutron-onos.yaml'
53     },
54     'ovn': 'neutron-ml2-ovn.yaml',
55     False: {
56         'vpp': 'neutron-ml2-vpp.yaml',
57         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
58     }
59 }
60
61 OTHER_FILE_MAP = {
62     'tacker': 'enable_tacker.yaml',
63     'congress': 'enable_congress.yaml',
64     'barometer': 'enable_barometer.yaml',
65     'rt_kvm': 'enable_rt_kvm.yaml'
66 }
67
68 OVS_PERF_MAP = {
69     'HostCpusList': 'dpdk_cores',
70     'NeutronDpdkCoreList': 'pmd_cores',
71     'NeutronDpdkSocketMemory': 'socket_memory',
72     'NeutronDpdkMemoryChannels': 'memory_channels'
73 }
74
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95 DUPLICATE_COMPUTE_SERVICES = [
96     'OS::TripleO::Services::ComputeNeutronCorePlugin',
97     'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98     'OS::TripleO::Services::ComputeNeutronOvsAgent',
99     'OS::TripleO::Services::ComputeNeutronL3Agent'
100 ]
101
102
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
104     """
105     Builds a list of SDN environment files to be used in the deploy cmd.
106
107     This function recursively searches an sdn_map.  First the sdn controller is
108     matched and then the function looks for enabled features for that
109     controller to determine which environment files should be used.  By
110     default the feature will be checked if set to true in deploy settings to be
111     added to the list.  If a feature does not have a boolean value, then the
112     key and value pair to compare with are checked as a tuple (k,v).
113
114     :param ds: deploy settings
115     :param sdn_map: SDN map to recursively search
116     :param env_list: recursive var to hold previously found env_list
117     :return: A list of env files
118     """
119     if env_list is None:
120         env_list = list()
121     for k, v in sdn_map.items():
122         if ds['sdn_controller'] == k or (k in ds and ds[k]):
123             if isinstance(v, dict):
124                 # Append default SDN env file first
125                 # The assumption is that feature-enabled SDN env files
126                 # override and do not conflict with previously set default
127                 # settings
128                 if ds['sdn_controller'] == k and 'default' in v:
129                     env_list.append(os.path.join(con.THT_ENV_DIR,
130                                                  v['default']))
131                 env_list.extend(build_sdn_env_list(ds, v))
132             # check if the value is not a boolean
133             elif isinstance(v, tuple):
134                     if ds[k] == v[0]:
135                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
136             else:
137                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138     if len(env_list) == 0:
139         try:
140             env_list.append(os.path.join(
141                 con.THT_ENV_DIR, sdn_map['default']))
142         except KeyError:
143             logging.warning("Unable to find default file for SDN")
144
145     return env_list
146
147
148 def get_docker_sdn_files(ds_opts):
149     """
150     Returns docker env file for detected SDN
151     :param ds_opts: deploy options
152     :return: list of docker THT env files for an SDN
153     """
154     docker_services = con.VALID_DOCKER_SERVICES
155     tht_dir = con.THT_DOCKER_ENV_DIR
156     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157     for i, sdn_file in enumerate(sdn_env_list):
158         sdn_base = os.path.basename(sdn_file)
159         if sdn_base in docker_services:
160             if docker_services[sdn_base] is not None:
161                 sdn_env_list[i] = \
162                     os.path.join(tht_dir, docker_services[sdn_base])
163             else:
164                 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
165     return sdn_env_list
166
167
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169                       virtual, env_file='opnfv-environment.yaml',
170                       net_data=False):
171
172     logging.info("Creating deployment command")
173     deploy_options = ['network-environment.yaml']
174
175     ds_opts = ds['deploy_options']
176
177     if ds_opts['containers']:
178         deploy_options.append(os.path.join(con.THT_ENV_DIR,
179                                            'docker.yaml'))
180
181     if ds['global_params']['ha_enabled']:
182         if ds_opts['containers']:
183             deploy_options.append(os.path.join(con.THT_ENV_DIR,
184                                                'docker-ha.yaml'))
185         else:
186             deploy_options.append(os.path.join(con.THT_ENV_DIR,
187                                                'puppet-pacemaker.yaml'))
188
189     if env_file:
190         deploy_options.append(env_file)
191
192     if ds_opts['containers']:
193         deploy_options.append('docker-images.yaml')
194         sdn_docker_files = get_docker_sdn_files(ds_opts)
195         for sdn_docker_file in sdn_docker_files:
196             deploy_options.append(sdn_docker_file)
197     else:
198         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
199
200     for k, v in OTHER_FILE_MAP.items():
201         if k in ds_opts and ds_opts[k]:
202             if ds_opts['containers']:
203                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
204                                                    "{}.yaml".format(k)))
205             else:
206                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
207
208     # TODO(trozet) Fix this check to look for if ceph is in controller services
209     # and not use name of the file
210     if ds_opts['ceph'] and 'csit' not in env_file:
211         prep_storage_env(ds, ns, virtual, tmp_dir)
212         deploy_options.append(os.path.join(con.THT_ENV_DIR,
213                                            'storage-environment.yaml'))
214     if ds_opts['sriov']:
215         prep_sriov_env(ds, tmp_dir)
216
217     # Check for 'k8s' here intentionally, as we may support other values
218     # such as openstack/openshift for 'vim' option.
219     if ds_opts['vim'] == 'k8s':
220         deploy_options.append('kubernetes-environment.yaml')
221
222     if virtual:
223         deploy_options.append('virtual-environment.yaml')
224     else:
225         deploy_options.append('baremetal-environment.yaml')
226
227     num_control, num_compute = inv.get_node_counts()
228     if num_control > 1 and not ds['global_params']['ha_enabled']:
229         num_control = 1
230     if platform.machine() == 'aarch64':
231         # aarch64 deploys were not completing in the default 90 mins.
232         # Not sure if this is related to the hardware the OOO support
233         # was developed on or the virtualization support in CentOS
234         # Either way it will probably get better over time  as the aarch
235         # support matures in CentOS and deploy time should be tested in
236         # the future so this multiplier can be removed.
237         con.DEPLOY_TIMEOUT *= 2
238     cmd = "openstack overcloud deploy --templates --timeout {} " \
239           .format(con.DEPLOY_TIMEOUT)
240     # build cmd env args
241     for option in deploy_options:
242         cmd += " -e {}".format(option)
243     cmd += " --ntp-server {}".format(ns['ntp'][0])
244     cmd += " --control-scale {}".format(num_control)
245     cmd += " --compute-scale {}".format(num_compute)
246     cmd += ' --control-flavor control --compute-flavor compute'
247     if net_data:
248         cmd += ' --networks-file network_data.yaml'
249     libvirt_type = 'kvm'
250     if virtual and (platform.machine() != 'aarch64'):
251         with open('/sys/module/kvm_intel/parameters/nested') as f:
252             nested_kvm = f.read().strip()
253             if nested_kvm != 'Y':
254                 libvirt_type = 'qemu'
255     elif virtual and (platform.machine() == 'aarch64'):
256         libvirt_type = 'qemu'
257     cmd += ' --libvirt-type {}'.format(libvirt_type)
258     if platform.machine() == 'aarch64':
259         cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
260     logging.info("Deploy command set: {}".format(cmd))
261
262     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
263         fh.write(cmd)
264     return cmd
265
266
267 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
268                patches=None):
269     """
270     Locates sdn image and preps for deployment.
271     :param ds: deploy settings
272     :param ns: network settings
273     :param img: sdn image
274     :param tmp_dir: dir to store modified sdn image
275     :param root_pw: password to configure for overcloud image
276     :param docker_tag: Docker image tag for RDO version (default None)
277     :param patches: List of patches to apply to overcloud image
278     :return: None
279     """
280     # TODO(trozet): Come up with a better way to organize this logic in this
281     # function
282     logging.info("Preparing image: {} for deployment".format(img))
283     if not os.path.isfile(img):
284         logging.error("Missing SDN image {}".format(img))
285         raise ApexDeployException("Missing SDN image file: {}".format(img))
286
287     ds_opts = ds['deploy_options']
288     virt_cmds = list()
289     sdn = ds_opts['sdn_controller']
290     patched_containers = set()
291     # we need this due to rhbz #1436021
292     # fixed in systemd-219-37.el7
293     if sdn is not False:
294         logging.info("Neutron openvswitch-agent disabled")
295         virt_cmds.extend([{
296             con.VIRT_RUN_CMD:
297                 "rm -f /etc/systemd/system/multi-user.target.wants/"
298                 "neutron-openvswitch-agent.service"},
299             {
300             con.VIRT_RUN_CMD:
301                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
302                 ".service"
303         }])
304
305     if ns.get('http_proxy', ''):
306         virt_cmds.append({
307             con.VIRT_RUN_CMD:
308                 "echo 'http_proxy={}' >> /etc/environment".format(
309                     ns['http_proxy'])})
310
311     if ns.get('https_proxy', ''):
312         virt_cmds.append({
313             con.VIRT_RUN_CMD:
314                 "echo 'https_proxy={}' >> /etc/environment".format(
315                     ns['https_proxy'])})
316
317     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
318     shutil.copyfile(img, tmp_oc_image)
319     logging.debug("Temporary overcloud image stored as: {}".format(
320         tmp_oc_image))
321
322     if ds_opts['vpn']:
323         oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
324         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
325         virt_cmds.append({
326             con.VIRT_RUN_CMD:
327                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
328                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
329         virt_cmds.append({
330             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
331                               "zrpcd_start.sh"})
332         virt_cmds.append({
333             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
334                               "init.d/zrpcd_start.sh' /etc/rc.local "})
335         virt_cmds.append({
336             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
337                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
338         logging.info("ZRPCD process started")
339
340     dataplane = ds_opts['dataplane']
341     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
342         logging.info("Enabling kernel modules for dpdk")
343         # file to module mapping
344         uio_types = {
345             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
346             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
347         }
348         for mod_file, mod in uio_types.items():
349             with open(mod_file, 'w') as fh:
350                 fh.write('#!/bin/bash\n')
351                 fh.write('exec /sbin/modprobe {}'.format(mod))
352                 fh.close()
353
354             virt_cmds.extend([
355                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
356                     mod_file)},
357                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
358                                    "{}".format(os.path.basename(mod_file))}
359             ])
360     if root_pw:
361         pw_op = "password:{}".format(root_pw)
362         virt_cmds.append({con.VIRT_PW: pw_op})
363
364     if dataplane == 'ovs':
365         if ds_opts['sfc']:
366             oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
367         elif sdn == 'opendaylight':
368             # FIXME(trozet) remove this after RDO is updated with fix for
369             # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
370             ovs_file = os.path.basename(con.CUSTOM_OVS)
371             ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
372             utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
373                                             targets=[ovs_file])
374             virt_cmds.extend([
375                 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
376                                                                   ovs_file))},
377                 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
378                     ovs_file)}
379             ])
380
381     if dataplane == 'fdio':
382         # Patch neutron with using OVS external interface for router
383         # and add generic linux NS interface driver
384         virt_cmds.append(
385             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
386                                "-p1 < neutron-patch-NSDriver.patch"})
387         if sdn is False:
388             virt_cmds.extend([
389                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
390                 {con.VIRT_RUN_CMD: "yum install -y "
391                                    "/root/nosdn_vpp_rpms/*.rpm"}
392             ])
393
394     undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
395         'installer_vm']['ip']
396     if sdn == 'opendaylight':
397         oc_builder.inject_opendaylight(
398             odl_version=ds_opts['odl_version'],
399             image=tmp_oc_image,
400             tmp_dir=tmp_dir,
401             uc_ip=undercloud_admin_ip,
402             os_version=ds_opts['os_version'],
403             docker_tag=docker_tag,
404         )
405         if docker_tag:
406             patched_containers = patched_containers.union({'opendaylight'})
407
408     if patches:
409         if ds_opts['os_version'] == 'master':
410             branch = ds_opts['os_version']
411         else:
412             branch = "stable/{}".format(ds_opts['os_version'])
413         logging.info('Adding patches to overcloud')
414         patched_containers = patched_containers.union(
415             c_builder.add_upstream_patches(patches,
416                                            tmp_oc_image, tmp_dir,
417                                            branch,
418                                            uc_ip=undercloud_admin_ip,
419                                            docker_tag=docker_tag))
420     # if containers with ceph, and no ceph device we need to use a
421     # persistent loop device for Ceph OSDs
422     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
423         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
424         with open(tmp_losetup, 'w') as fh:
425             fh.write(LOSETUP_SERVICE)
426         virt_cmds.extend([
427             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
428              },
429             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
430                 .format(LOOP_DEVICE_SIZE)},
431             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
432             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
433         ])
434     # TODO(trozet) remove this after LP#173474 is fixed
435     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
436     virt_cmds.append(
437         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
438                            "ConditionPathExists".format(dhcp_unit)})
439     # Prep for NFS
440     virt_cmds.extend([
441         {con.VIRT_INSTALL: "nfs-utils"},
442         {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
443                            "/etc/systemd/system/multi-user.target.wants/"
444                            "nfs-server.service"},
445         {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/glance"},
446         {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/cinder"},
447         {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/nova"},
448         {con.VIRT_RUN_CMD: "echo '/root/nfs/glance *(rw,sync,"
449                            "no_root_squash,no_acl)' > /etc/exports"},
450         {con.VIRT_RUN_CMD: "echo '/root/nfs/cinder *(rw,sync,"
451                            "no_root_squash,no_acl)' >> /etc/exports"},
452         {con.VIRT_RUN_CMD: "echo '/root/nfs/nova *(rw,sync,"
453                            "no_root_squash,no_acl)' >> /etc/exports"},
454         {con.VIRT_RUN_CMD: "exportfs -avr"},
455     ])
456     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
457     logging.info("Overcloud image customization complete")
458     return patched_containers
459
460
461 def make_ssh_key():
462     """
463     Creates public and private ssh keys with 1024 bit RSA encryption
464     :return: private, public key
465     """
466     key = rsa.generate_private_key(
467         backend=crypto_default_backend(),
468         public_exponent=65537,
469         key_size=1024
470     )
471
472     private_key = key.private_bytes(
473         crypto_serialization.Encoding.PEM,
474         crypto_serialization.PrivateFormat.PKCS8,
475         crypto_serialization.NoEncryption())
476     public_key = key.public_key().public_bytes(
477         crypto_serialization.Encoding.OpenSSH,
478         crypto_serialization.PublicFormat.OpenSSH
479     )
480     return private_key.decode('utf-8'), public_key.decode('utf-8')
481
482
483 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
484     """
485     Creates modified opnfv/network environments for deployment
486     :param ds: deploy settings
487     :param ns: network settings
488     :param inv: node inventory
489     :param opnfv_env: file path for opnfv-environment file
490     :param net_env: file path for network-environment file
491     :param tmp_dir: Apex tmp dir
492     :return:
493     """
494
495     logging.info("Preparing opnfv-environment and network-environment files")
496     ds_opts = ds['deploy_options']
497     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
498     shutil.copyfile(opnfv_env, tmp_opnfv_env)
499     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
500     tenant_nic = dict()
501     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
502     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
503     external_nic_map = ns['networks']['external'][0]['nic_mapping']
504     external_nic = dict()
505     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
506
507     # SSH keys
508     private_key, public_key = make_ssh_key()
509
510     num_control, num_compute = inv.get_node_counts()
511     if num_control > 1 and not ds['global_params']['ha_enabled']:
512         num_control = 1
513
514     # Make easier/faster variables to index in the file editor
515     if 'performance' in ds_opts:
516         perf = True
517         # vpp
518         if 'vpp' in ds_opts['performance']['Compute']:
519             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
520         else:
521             perf_vpp_comp = None
522         if 'vpp' in ds_opts['performance']['Controller']:
523             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
524         else:
525             perf_vpp_ctrl = None
526
527         # ovs
528         if 'ovs' in ds_opts['performance']['Compute']:
529             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
530         else:
531             perf_ovs_comp = None
532
533         # kernel
534         if 'kernel' in ds_opts['performance']['Compute']:
535             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
536         else:
537             perf_kern_comp = None
538     else:
539         perf = False
540
541     tenant_settings = ns['networks']['tenant']
542     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
543         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
544
545     # Modify OPNFV environment
546     # TODO: Change to build a dict and outputting yaml rather than parsing
547     for line in fileinput.input(tmp_opnfv_env, inplace=True):
548         line = line.strip('\n')
549         output_line = line
550         if 'CloudDomain' in line:
551             output_line = "  CloudDomain: {}".format(ns['domain_name'])
552         elif 'replace_private_key' in line:
553             output_line = "    private_key: |\n"
554             key_out = ''
555             for line in private_key.splitlines():
556                 key_out += "      {}\n".format(line)
557             output_line += key_out
558         elif 'replace_public_key' in line:
559             output_line = "    public_key: '{}'".format(public_key)
560         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
561                 'resource_registry' in line:
562             output_line = "resource_registry:\n" \
563                           "  OS::TripleO::NodeUserData: first-boot.yaml"
564         elif 'ComputeExtraConfigPre' in line and \
565                 ds_opts['dataplane'] == 'ovs_dpdk':
566             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
567                           './ovs-dpdk-preconfig.yaml'
568         elif 'NeutronNetworkVLANRanges' in line:
569             vlan_setting = ''
570             if tenant_vlan_enabled:
571                 if ns['networks']['tenant']['overlay_id_range']:
572                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
573                     if 'datacentre' not in vlan_setting:
574                         vlan_setting += ',datacentre:1:1000'
575             # SRIOV networks are VLAN based provider networks. In order to
576             # simplify the deployment, nfv_sriov will be the default physnet.
577             # VLANs are not needed in advance, and the user will have to create
578             # the network specifying the segmentation-id.
579             if ds_opts['sriov']:
580                 if vlan_setting:
581                     vlan_setting += ",nfv_sriov"
582                 else:
583                     vlan_setting = "datacentre:1:1000,nfv_sriov"
584             if vlan_setting:
585                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
586         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
587             if tenant_settings['overlay_id_range']:
588                 physnets = tenant_settings['overlay_id_range'].split(',')
589                 output_line = "  NeutronBridgeMappings: "
590                 for physnet in physnets:
591                     physnet_name = physnet.split(':')[0]
592                     if physnet_name != 'datacentre':
593                         output_line += "{}:br-vlan,".format(physnet_name)
594                 output_line += "datacentre:br-ex"
595         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
596                 and ds_opts['sdn_controller'] == 'opendaylight':
597             if tenant_settings['overlay_id_range']:
598                 physnets = tenant_settings['overlay_id_range'].split(',')
599                 output_line = "  OpenDaylightProviderMappings: "
600                 for physnet in physnets:
601                     physnet_name = physnet.split(':')[0]
602                     if physnet_name != 'datacentre':
603                         output_line += "{}:br-vlan,".format(physnet_name)
604                 output_line += "datacentre:br-ex"
605         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
606             output_line = "  NeutronNetworkType: vlan\n" \
607                           "  NeutronTunnelTypes: ''"
608
609         if ds_opts['sdn_controller'] == 'opendaylight' and \
610                 'odl_vpp_routing_node' in ds_opts:
611             if 'opendaylight::vpp_routing_node' in line:
612                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
613                                .format(ds_opts['odl_vpp_routing_node'],
614                                        ns['domain_name']))
615         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
616             if 'NeutronVPPAgentPhysnets' in line:
617                 # VPP interface tap0 will be used for external network
618                 # connectivity.
619                 output_line = ("  NeutronVPPAgentPhysnets: "
620                                "'datacentre:{},external:tap0'"
621                                .format(tenant_nic['Controller']))
622         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
623                 'dvr') is True:
624             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
625                 output_line = ''
626             elif 'NeutronDhcpAgentsPerNetwork' in line:
627                 if num_compute == 0:
628                     num_dhcp_agents = num_control
629                 else:
630                     num_dhcp_agents = num_compute
631                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
632                                .format(num_dhcp_agents))
633             elif 'ComputeServices' in line:
634                 output_line = ("  ComputeServices:\n"
635                                "    - OS::TripleO::Services::NeutronDhcpAgent")
636
637         if perf:
638             for role in 'NovaCompute', 'Controller':
639                 if role == 'NovaCompute':
640                     perf_opts = perf_vpp_comp
641                 else:
642                     perf_opts = perf_vpp_ctrl
643                 cfg = "{}ExtraConfig".format(role)
644                 if cfg in line and perf_opts:
645                     perf_line = ''
646                     if 'main-core' in perf_opts:
647                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
648                                       .format(perf_opts['main-core']))
649                     if 'corelist-workers' in perf_opts:
650                         perf_line += ("\n    "
651                                       "fdio::vpp_cpu_corelist_workers: '{}'"
652                                       .format(perf_opts['corelist-workers']))
653                     if ds_opts['sdn_controller'] == 'opendaylight' and \
654                             ds_opts['dataplane'] == 'fdio':
655                         if role == 'NovaCompute':
656                             perf_line += ("\n    "
657                                           "tripleo::profile::base::neutron::"
658                                           "agents::honeycomb::"
659                                           "interface_role_mapping:"
660                                           " ['{}:tenant-interface',"
661                                           "'{}:public-interface']"
662                                           .format(tenant_nic[role],
663                                                   external_nic[role]))
664                         else:
665                             perf_line += ("\n    "
666                                           "tripleo::profile::base::neutron::"
667                                           "agents::honeycomb::"
668                                           "interface_role_mapping:"
669                                           " ['{}:tenant-interface']"
670                                           .format(tenant_nic[role]))
671                     if perf_line:
672                         output_line = ("  {}:{}".format(cfg, perf_line))
673
674             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
675                 for k, v in OVS_PERF_MAP.items():
676                     if k in line and v in perf_ovs_comp:
677                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
678
679             # kernel args
680             # (FIXME) use compute's kernel settings for all nodes for now.
681             if perf_kern_comp:
682                 if 'NovaSchedulerDefaultFilters' in line:
683                     output_line = \
684                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
685                         "ComputeFilter,AvailabilityZoneFilter," \
686                         "ComputeCapabilitiesFilter," \
687                         "ImagePropertiesFilter,NUMATopologyFilter'"
688                 elif 'ComputeKernelArgs' in line:
689                     kernel_args = ''
690                     for k, v in perf_kern_comp.items():
691                         kernel_args += "{}={} ".format(k, v)
692                     if kernel_args:
693                         output_line = "  ComputeKernelArgs: '{}'".\
694                             format(kernel_args)
695
696         print(output_line)
697
698     # Merge compute services into control services if only a single
699     # node deployment
700     if num_compute == 0:
701         logging.info("All in one deployment. Checking if service merging "
702                      "required into control services")
703         with open(tmp_opnfv_env, 'r') as fh:
704             data = yaml.safe_load(fh)
705         param_data = data['parameter_defaults']
706         # Check to see if any parameters are set for Compute
707         for param in param_data.keys():
708             if param != 'ComputeServices' and param.startswith('Compute'):
709                 logging.warning("Compute parameter set, but will not be used "
710                                 "in deployment: {}. Please use Controller "
711                                 "based parameters when using All-in-one "
712                                 "deployments".format(param))
713         if ('ControllerServices' in param_data and 'ComputeServices' in
714                 param_data):
715             logging.info("Services detected in environment file. Merging...")
716             ctrl_services = param_data['ControllerServices']
717             cmp_services = param_data['ComputeServices']
718             param_data['ControllerServices'] = list(set().union(
719                 ctrl_services, cmp_services))
720             for dup_service in DUPLICATE_COMPUTE_SERVICES:
721                 if dup_service in param_data['ControllerServices']:
722                     param_data['ControllerServices'].remove(dup_service)
723             param_data.pop('ComputeServices')
724             logging.debug("Merged controller services: {}".format(
725                 pprint.pformat(param_data['ControllerServices'])
726             ))
727             with open(tmp_opnfv_env, 'w') as fh:
728                 yaml.safe_dump(data, fh, default_flow_style=False)
729         else:
730             logging.info("No services detected in env file, not merging "
731                          "services")
732
733     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
734     with open(tmp_opnfv_env, 'r') as fh:
735         logging.debug("opnfv-environment content is : {}".format(
736             pprint.pformat(yaml.safe_load(fh.read()))
737         ))
738
739
740 def generate_ceph_key():
741     key = os.urandom(16)
742     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
743     return base64.b64encode(header + key)
744
745
746 def prep_storage_env(ds, ns, virtual, tmp_dir):
747     """
748     Creates storage environment file for deployment.  Source file is copied by
749     undercloud playbook to host.
750     :param ds:
751     :param ns:
752     :param virtual:
753     :param tmp_dir:
754     :return:
755     """
756     ds_opts = ds['deploy_options']
757     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
758     if not os.path.isfile(storage_file):
759         logging.error("storage-environment file is not in tmp directory: {}. "
760                       "Check if file was copied from "
761                       "undercloud".format(tmp_dir))
762         raise ApexDeployException("storage-environment file not copied from "
763                                   "undercloud")
764     for line in fileinput.input(storage_file, inplace=True):
765         line = line.strip('\n')
766         if 'CephClusterFSID' in line:
767             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
768         elif 'CephMonKey' in line:
769             print("  CephMonKey: {}".format(generate_ceph_key().decode(
770                 'utf-8')))
771         elif 'CephAdminKey' in line:
772             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
773                 'utf-8')))
774         elif 'CephClientKey' in line:
775             print("  CephClientKey: {}".format(generate_ceph_key().decode(
776                 'utf-8')))
777         else:
778             print(line)
779
780     if ds_opts['containers']:
781         ceph_params = {}
782
783         # max pgs allowed are calculated as num_mons * 200. Therefore we
784         # set number of pgs and pools so that the total will be less:
785         # num_pgs * num_pools * num_osds
786         ceph_params['CephPoolDefaultSize'] = 2
787         ceph_params['CephPoolDefaultPgNum'] = 32
788         if virtual:
789             ceph_params['CephAnsibleExtraConfig'] = {
790                 'centos_package_dependencies': [],
791                 'ceph_osd_docker_memory_limit': '1g',
792                 'ceph_mds_docker_memory_limit': '1g',
793             }
794         ceph_device = ds_opts['ceph_device']
795         ceph_params['CephAnsibleDisksConfig'] = {
796             'devices': [ceph_device],
797             'journal_size': 512,
798             'osd_scenario': 'collocated'
799         }
800         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
801     # TODO(trozet): remove following block as we only support containers now
802     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
803         with open(storage_file, 'a') as fh:
804             fh.write('  ExtraConfig:\n')
805             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
806                 ds_opts['ceph_device']
807             ))
808
809
810 def prep_sriov_env(ds, tmp_dir):
811     """
812     Creates SRIOV environment file for deployment. Source file is copied by
813     undercloud playbook to host.
814     :param ds:
815     :param tmp_dir:
816     :return:
817     """
818     ds_opts = ds['deploy_options']
819     sriov_iface = ds_opts['sriov']
820     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
821     if not os.path.isfile(sriov_file):
822         logging.error("sriov-environment file is not in tmp directory: {}. "
823                       "Check if file was copied from "
824                       "undercloud".format(tmp_dir))
825         raise ApexDeployException("sriov-environment file not copied from "
826                                   "undercloud")
827     # TODO(rnoriega): Instead of line editing, refactor this code to load
828     # yaml file into a dict, edit it and write the file back.
829     for line in fileinput.input(sriov_file, inplace=True):
830         line = line.strip('\n')
831         if 'NovaSchedulerDefaultFilters' in line:
832             print("  {}".format(line[3:]))
833         elif 'NovaSchedulerAvailableFilters' in line:
834             print("  {}".format(line[3:]))
835         elif 'NeutronPhysicalDevMappings' in line:
836             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
837                   .format(sriov_iface))
838         elif 'NeutronSriovNumVFs' in line:
839             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
840         elif 'NovaPCIPassthrough' in line:
841             print("  NovaPCIPassthrough:")
842         elif 'devname' in line:
843             print("    - devname: \"{}\"".format(sriov_iface))
844         elif 'physical_network' in line:
845             print("      physical_network: \"nfv_sriov\"")
846         else:
847             print(line)
848
849
850 def external_network_cmds(ns, ds):
851     """
852     Generates external network openstack commands
853     :param ns: network settings
854     :param ds: deploy settings
855     :return: list of commands to configure external network
856     """
857     ds_opts = ds['deploy_options']
858     external_physnet = 'datacentre'
859     if ds_opts['dataplane'] == 'fdio' and \
860        ds_opts['sdn_controller'] != 'opendaylight':
861         external_physnet = 'external'
862     if 'external' in ns.enabled_network_list:
863         net_config = ns['networks']['external'][0]
864         external = True
865         pool_start, pool_end = net_config['floating_ip_range']
866     else:
867         net_config = ns['networks']['admin']
868         external = False
869         pool_start, pool_end = ns['apex']['networks']['admin'][
870             'introspection_range']
871     nic_config = net_config['nic_mapping']
872     gateway = net_config['gateway']
873     cmds = list()
874     # create network command
875     if nic_config['compute']['vlan'] == 'native':
876         ext_type = 'flat'
877     else:
878         ext_type = "vlan --provider-segment {}".format(nic_config[
879                                                        'compute']['vlan'])
880     cmds.append("openstack network create external --project service "
881                 "--external --provider-network-type {} "
882                 "--provider-physical-network {}"
883                 .format(ext_type, external_physnet))
884     # create subnet command
885     cidr = net_config['cidr']
886     subnet_cmd = "openstack subnet create external-subnet --project " \
887                  "service --network external --no-dhcp --gateway {} " \
888                  "--allocation-pool start={},end={} --subnet-range " \
889                  "{}".format(gateway, pool_start, pool_end, str(cidr))
890     if external and cidr.version == 6:
891         subnet_cmd += ' --ip-version 6'
892     cmds.append(subnet_cmd)
893     logging.debug("Neutron external network commands determined "
894                   "as: {}".format(cmds))
895     return cmds
896
897
898 def create_congress_cmds(overcloud_file):
899     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
900     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
901     logging.info("Creating congress commands")
902     try:
903         ds_cfg = [
904             "username={}".format(overcloudrc['OS_USERNAME']),
905             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
906             "password={}".format(overcloudrc['OS_PASSWORD']),
907             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
908         ]
909     except KeyError:
910         logging.error("Unable to find all keys required for congress in "
911                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
912                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
913                       "file: {}".format(overcloud_file))
914         raise
915     cmds = list()
916     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
917
918     for driver in drivers:
919         if driver == 'doctor':
920             cmd = "{} \"{}\"".format(driver, driver)
921         else:
922             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
923         if driver == 'nova':
924             cmd += ' --config api_version="2.34"'
925         logging.debug("Congress command created: {}".format(cmd))
926         cmds.append(cmd)
927     return cmds