Fix NFS issues with Nova
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import pprint
16 import shutil
17 import uuid
18 import struct
19 import time
20 import yaml
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
23
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
30     crypto_serialization
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33     crypto_default_backend
34
35
36 SDN_FILE_MAP = {
37     'opendaylight': {
38         'sfc': 'neutron-sfc-opendaylight.yaml',
39         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40         'gluon': 'gluon.yaml',
41         'vpp': {
42             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44             'default': 'neutron-opendaylight-honeycomb.yaml'
45         },
46         'l2gw': 'neutron-l2gw-opendaylight.yaml',
47         'sriov': 'neutron-opendaylight-sriov.yaml',
48         'default': 'neutron-opendaylight.yaml',
49     },
50     'onos': {
51         'sfc': 'neutron-onos-sfc.yaml',
52         'default': 'neutron-onos.yaml'
53     },
54     'ovn': 'neutron-ml2-ovn.yaml',
55     False: {
56         'vpp': 'neutron-ml2-vpp.yaml',
57         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
58     }
59 }
60
61 OTHER_FILE_MAP = {
62     'tacker': 'enable_tacker.yaml',
63     'congress': 'enable_congress.yaml',
64     'barometer': 'enable_barometer.yaml',
65     'rt_kvm': 'enable_rt_kvm.yaml'
66 }
67
68 OVS_PERF_MAP = {
69     'HostCpusList': 'dpdk_cores',
70     'NeutronDpdkCoreList': 'pmd_cores',
71     'NeutronDpdkSocketMemory': 'socket_memory',
72     'NeutronDpdkMemoryChannels': 'memory_channels'
73 }
74
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95 DUPLICATE_COMPUTE_SERVICES = [
96     'OS::TripleO::Services::ComputeNeutronCorePlugin',
97     'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98     'OS::TripleO::Services::ComputeNeutronOvsAgent',
99     'OS::TripleO::Services::ComputeNeutronL3Agent'
100 ]
101
102
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
104     """
105     Builds a list of SDN environment files to be used in the deploy cmd.
106
107     This function recursively searches an sdn_map.  First the sdn controller is
108     matched and then the function looks for enabled features for that
109     controller to determine which environment files should be used.  By
110     default the feature will be checked if set to true in deploy settings to be
111     added to the list.  If a feature does not have a boolean value, then the
112     key and value pair to compare with are checked as a tuple (k,v).
113
114     :param ds: deploy settings
115     :param sdn_map: SDN map to recursively search
116     :param env_list: recursive var to hold previously found env_list
117     :return: A list of env files
118     """
119     if env_list is None:
120         env_list = list()
121     for k, v in sdn_map.items():
122         if ds['sdn_controller'] == k or (k in ds and ds[k]):
123             if isinstance(v, dict):
124                 # Append default SDN env file first
125                 # The assumption is that feature-enabled SDN env files
126                 # override and do not conflict with previously set default
127                 # settings
128                 if ds['sdn_controller'] == k and 'default' in v:
129                     env_list.append(os.path.join(con.THT_ENV_DIR,
130                                                  v['default']))
131                 env_list.extend(build_sdn_env_list(ds, v))
132             # check if the value is not a boolean
133             elif isinstance(v, tuple):
134                     if ds[k] == v[0]:
135                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
136             else:
137                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138     if len(env_list) == 0:
139         try:
140             env_list.append(os.path.join(
141                 con.THT_ENV_DIR, sdn_map['default']))
142         except KeyError:
143             logging.warning("Unable to find default file for SDN")
144
145     return env_list
146
147
148 def get_docker_sdn_files(ds_opts):
149     """
150     Returns docker env file for detected SDN
151     :param ds_opts: deploy options
152     :return: list of docker THT env files for an SDN
153     """
154     docker_services = con.VALID_DOCKER_SERVICES
155     tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
156     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157     for i, sdn_file in enumerate(sdn_env_list):
158         sdn_base = os.path.basename(sdn_file)
159         if sdn_base in docker_services:
160             if docker_services[sdn_base] is not None:
161                 sdn_env_list[i] = \
162                     os.path.join(tht_dir, docker_services[sdn_base])
163             else:
164                 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
165     return sdn_env_list
166
167
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169                       virtual, env_file='opnfv-environment.yaml',
170                       net_data=False):
171
172     logging.info("Creating deployment command")
173     deploy_options = ['network-environment.yaml']
174
175     ds_opts = ds['deploy_options']
176
177     if ds_opts['containers']:
178         deploy_options.append(os.path.join(con.THT_ENV_DIR,
179                                            'docker.yaml'))
180
181     if ds['global_params']['ha_enabled']:
182         if ds_opts['containers']:
183             deploy_options.append(os.path.join(con.THT_ENV_DIR,
184                                                'docker-ha.yaml'))
185         else:
186             deploy_options.append(os.path.join(con.THT_ENV_DIR,
187                                                'puppet-pacemaker.yaml'))
188
189     if env_file:
190         deploy_options.append(env_file)
191
192     if ds_opts['containers']:
193         deploy_options.append('docker-images.yaml')
194         sdn_docker_files = get_docker_sdn_files(ds_opts)
195         for sdn_docker_file in sdn_docker_files:
196             deploy_options.append(sdn_docker_file)
197         if sdn_docker_files:
198             deploy_options.append('sdn-images.yaml')
199     else:
200         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
201
202     for k, v in OTHER_FILE_MAP.items():
203         if k in ds_opts and ds_opts[k]:
204             if ds_opts['containers']:
205                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
206                                                    "{}.yaml".format(k)))
207             else:
208                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
209
210     # TODO(trozet) Fix this check to look for if ceph is in controller services
211     # and not use name of the file
212     if ds_opts['ceph'] and 'csit' not in env_file:
213         prep_storage_env(ds, ns, virtual, tmp_dir)
214         deploy_options.append(os.path.join(con.THT_ENV_DIR,
215                                            'storage-environment.yaml'))
216     if ds_opts['sriov']:
217         prep_sriov_env(ds, tmp_dir)
218
219     # Check for 'k8s' here intentionally, as we may support other values
220     # such as openstack/openshift for 'vim' option.
221     if ds_opts['vim'] == 'k8s':
222         deploy_options.append('kubernetes-environment.yaml')
223
224     if virtual:
225         deploy_options.append('virtual-environment.yaml')
226     else:
227         deploy_options.append('baremetal-environment.yaml')
228
229     num_control, num_compute = inv.get_node_counts()
230     if num_control > 1 and not ds['global_params']['ha_enabled']:
231         num_control = 1
232     if platform.machine() == 'aarch64':
233         # aarch64 deploys were not completing in the default 90 mins.
234         # Not sure if this is related to the hardware the OOO support
235         # was developed on or the virtualization support in CentOS
236         # Either way it will probably get better over time  as the aarch
237         # support matures in CentOS and deploy time should be tested in
238         # the future so this multiplier can be removed.
239         con.DEPLOY_TIMEOUT *= 2
240     cmd = "openstack overcloud deploy --templates --timeout {} " \
241           .format(con.DEPLOY_TIMEOUT)
242     # build cmd env args
243     for option in deploy_options:
244         cmd += " -e {}".format(option)
245     cmd += " --ntp-server {}".format(ns['ntp'][0])
246     cmd += " --control-scale {}".format(num_control)
247     cmd += " --compute-scale {}".format(num_compute)
248     cmd += ' --control-flavor control --compute-flavor compute'
249     if net_data:
250         cmd += ' --networks-file network_data.yaml'
251     libvirt_type = 'kvm'
252     if virtual:
253         with open('/sys/module/kvm_intel/parameters/nested') as f:
254             nested_kvm = f.read().strip()
255             if nested_kvm != 'Y':
256                 libvirt_type = 'qemu'
257     cmd += ' --libvirt-type {}'.format(libvirt_type)
258     logging.info("Deploy command set: {}".format(cmd))
259
260     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
261         fh.write(cmd)
262     return cmd
263
264
265 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
266                patches=None):
267     """
268     Locates sdn image and preps for deployment.
269     :param ds: deploy settings
270     :param ns: network settings
271     :param img: sdn image
272     :param tmp_dir: dir to store modified sdn image
273     :param root_pw: password to configure for overcloud image
274     :param docker_tag: Docker image tag for RDO version (default None)
275     :param patches: List of patches to apply to overcloud image
276     :return: None
277     """
278     # TODO(trozet): Come up with a better way to organize this logic in this
279     # function
280     logging.info("Preparing image: {} for deployment".format(img))
281     if not os.path.isfile(img):
282         logging.error("Missing SDN image {}".format(img))
283         raise ApexDeployException("Missing SDN image file: {}".format(img))
284
285     ds_opts = ds['deploy_options']
286     virt_cmds = list()
287     sdn = ds_opts['sdn_controller']
288     patched_containers = set()
289     # we need this due to rhbz #1436021
290     # fixed in systemd-219-37.el7
291     if sdn is not False:
292         logging.info("Neutron openvswitch-agent disabled")
293         virt_cmds.extend([{
294             con.VIRT_RUN_CMD:
295                 "rm -f /etc/systemd/system/multi-user.target.wants/"
296                 "neutron-openvswitch-agent.service"},
297             {
298             con.VIRT_RUN_CMD:
299                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
300                 ".service"
301         }])
302
303     if ns.get('http_proxy', ''):
304         virt_cmds.append({
305             con.VIRT_RUN_CMD:
306                 "echo 'http_proxy={}' >> /etc/environment".format(
307                     ns['http_proxy'])})
308
309     if ns.get('https_proxy', ''):
310         virt_cmds.append({
311             con.VIRT_RUN_CMD:
312                 "echo 'https_proxy={}' >> /etc/environment".format(
313                     ns['https_proxy'])})
314
315     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
316     shutil.copyfile(img, tmp_oc_image)
317     logging.debug("Temporary overcloud image stored as: {}".format(
318         tmp_oc_image))
319
320     if ds_opts['vpn']:
321         oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
322         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
323         virt_cmds.append({
324             con.VIRT_RUN_CMD:
325                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
326                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
327         virt_cmds.append({
328             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
329                               "zrpcd_start.sh"})
330         virt_cmds.append({
331             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
332                               "init.d/zrpcd_start.sh' /etc/rc.local "})
333         virt_cmds.append({
334             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
335                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
336         logging.info("ZRPCD process started")
337
338     dataplane = ds_opts['dataplane']
339     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
340         logging.info("Enabling kernel modules for dpdk")
341         # file to module mapping
342         uio_types = {
343             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
344             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
345         }
346         for mod_file, mod in uio_types.items():
347             with open(mod_file, 'w') as fh:
348                 fh.write('#!/bin/bash\n')
349                 fh.write('exec /sbin/modprobe {}'.format(mod))
350                 fh.close()
351
352             virt_cmds.extend([
353                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
354                     mod_file)},
355                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
356                                    "{}".format(os.path.basename(mod_file))}
357             ])
358     if root_pw:
359         pw_op = "password:{}".format(root_pw)
360         virt_cmds.append({con.VIRT_PW: pw_op})
361
362     if dataplane == 'ovs':
363         if ds_opts['sfc']:
364             oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
365         elif sdn == 'opendaylight':
366             # FIXME(trozet) remove this after RDO is updated with fix for
367             # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
368             ovs_file = os.path.basename(con.CUSTOM_OVS)
369             ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
370             utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
371                                             targets=[ovs_file])
372             virt_cmds.extend([
373                 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
374                                                                   ovs_file))},
375                 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
376                     ovs_file)}
377             ])
378
379     if dataplane == 'fdio':
380         # Patch neutron with using OVS external interface for router
381         # and add generic linux NS interface driver
382         virt_cmds.append(
383             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
384                                "-p1 < neutron-patch-NSDriver.patch"})
385         if sdn is False:
386             virt_cmds.extend([
387                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
388                 {con.VIRT_RUN_CMD: "yum install -y "
389                                    "/root/nosdn_vpp_rpms/*.rpm"}
390             ])
391
392     undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
393         'installer_vm']['ip']
394     if sdn == 'opendaylight':
395         oc_builder.inject_opendaylight(
396             odl_version=ds_opts['odl_version'],
397             image=tmp_oc_image,
398             tmp_dir=tmp_dir,
399             uc_ip=undercloud_admin_ip,
400             os_version=ds_opts['os_version'],
401             docker_tag=docker_tag,
402         )
403         if docker_tag:
404             patched_containers = patched_containers.union({'opendaylight'})
405
406     if patches:
407         if ds_opts['os_version'] == 'master':
408             branch = ds_opts['os_version']
409         else:
410             branch = "stable/{}".format(ds_opts['os_version'])
411         logging.info('Adding patches to overcloud')
412         patched_containers = patched_containers.union(
413             c_builder.add_upstream_patches(patches,
414                                            tmp_oc_image, tmp_dir,
415                                            branch,
416                                            uc_ip=undercloud_admin_ip,
417                                            docker_tag=docker_tag))
418     # if containers with ceph, and no ceph device we need to use a
419     # persistent loop device for Ceph OSDs
420     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
421         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
422         with open(tmp_losetup, 'w') as fh:
423             fh.write(LOSETUP_SERVICE)
424         virt_cmds.extend([
425             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
426              },
427             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
428                 .format(LOOP_DEVICE_SIZE)},
429             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
430             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
431         ])
432     # TODO(trozet) remove this after LP#173474 is fixed
433     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
434     virt_cmds.append(
435         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
436                            "ConditionPathExists".format(dhcp_unit)})
437     # Prep for NFS
438     virt_cmds.extend([
439         {con.VIRT_INSTALL: "nfs-utils"},
440         {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
441                            "/etc/systemd/system/multi-user.target.wants/"
442                            "nfs-server.service"},
443         {con.VIRT_RUN_CMD: "mkdir -p /glance"},
444         {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
445         {con.VIRT_RUN_CMD: "mkdir -p /nova"},
446         {con.VIRT_RUN_CMD: "chmod 777 /glance"},
447         {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
448         {con.VIRT_RUN_CMD: "chmod 777 /nova"},
449         {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
450                            "no_root_squash,no_acl)' > /etc/exports"},
451         {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
452                            "no_root_squash,no_acl)' >> /etc/exports"},
453         {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
454                            "no_root_squash,no_acl)' >> /etc/exports"},
455         {con.VIRT_RUN_CMD: "exportfs -avr"},
456     ])
457     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
458     logging.info("Overcloud image customization complete")
459     return patched_containers
460
461
462 def make_ssh_key():
463     """
464     Creates public and private ssh keys with 1024 bit RSA encryption
465     :return: private, public key
466     """
467     key = rsa.generate_private_key(
468         backend=crypto_default_backend(),
469         public_exponent=65537,
470         key_size=1024
471     )
472
473     private_key = key.private_bytes(
474         crypto_serialization.Encoding.PEM,
475         crypto_serialization.PrivateFormat.PKCS8,
476         crypto_serialization.NoEncryption())
477     public_key = key.public_key().public_bytes(
478         crypto_serialization.Encoding.OpenSSH,
479         crypto_serialization.PublicFormat.OpenSSH
480     )
481     return private_key.decode('utf-8'), public_key.decode('utf-8')
482
483
484 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
485     """
486     Creates modified opnfv/network environments for deployment
487     :param ds: deploy settings
488     :param ns: network settings
489     :param inv: node inventory
490     :param opnfv_env: file path for opnfv-environment file
491     :param net_env: file path for network-environment file
492     :param tmp_dir: Apex tmp dir
493     :return:
494     """
495
496     logging.info("Preparing opnfv-environment and network-environment files")
497     ds_opts = ds['deploy_options']
498     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
499     shutil.copyfile(opnfv_env, tmp_opnfv_env)
500     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
501     tenant_nic = dict()
502     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
503     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
504     external_nic_map = ns['networks']['external'][0]['nic_mapping']
505     external_nic = dict()
506     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
507
508     # SSH keys
509     private_key, public_key = make_ssh_key()
510
511     num_control, num_compute = inv.get_node_counts()
512     if num_control > 1 and not ds['global_params']['ha_enabled']:
513         num_control = 1
514
515     # Make easier/faster variables to index in the file editor
516     if 'performance' in ds_opts:
517         perf = True
518         # vpp
519         if 'vpp' in ds_opts['performance']['Compute']:
520             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
521         else:
522             perf_vpp_comp = None
523         if 'vpp' in ds_opts['performance']['Controller']:
524             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
525         else:
526             perf_vpp_ctrl = None
527
528         # ovs
529         if 'ovs' in ds_opts['performance']['Compute']:
530             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
531         else:
532             perf_ovs_comp = None
533
534         # kernel
535         if 'kernel' in ds_opts['performance']['Compute']:
536             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
537         else:
538             perf_kern_comp = None
539     else:
540         perf = False
541
542     tenant_settings = ns['networks']['tenant']
543     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
544         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
545
546     # Modify OPNFV environment
547     # TODO: Change to build a dict and outputting yaml rather than parsing
548     for line in fileinput.input(tmp_opnfv_env, inplace=True):
549         line = line.strip('\n')
550         output_line = line
551         if 'CloudDomain' in line:
552             output_line = "  CloudDomain: {}".format(ns['domain_name'])
553         elif 'replace_private_key' in line:
554             output_line = "    private_key: |\n"
555             key_out = ''
556             for line in private_key.splitlines():
557                 key_out += "      {}\n".format(line)
558             output_line += key_out
559         elif 'replace_public_key' in line:
560             output_line = "    public_key: '{}'".format(public_key)
561         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
562                 'resource_registry' in line:
563             output_line = "resource_registry:\n" \
564                           "  OS::TripleO::NodeUserData: first-boot.yaml"
565         elif 'ComputeExtraConfigPre' in line and \
566                 ds_opts['dataplane'] == 'ovs_dpdk':
567             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
568                           './ovs-dpdk-preconfig.yaml'
569         elif 'NeutronNetworkVLANRanges' in line:
570             vlan_setting = ''
571             if tenant_vlan_enabled:
572                 if ns['networks']['tenant']['overlay_id_range']:
573                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
574                     if 'datacentre' not in vlan_setting:
575                         vlan_setting += ',datacentre:1:1000'
576             # SRIOV networks are VLAN based provider networks. In order to
577             # simplify the deployment, nfv_sriov will be the default physnet.
578             # VLANs are not needed in advance, and the user will have to create
579             # the network specifying the segmentation-id.
580             if ds_opts['sriov']:
581                 if vlan_setting:
582                     vlan_setting += ",nfv_sriov"
583                 else:
584                     vlan_setting = "datacentre:1:1000,nfv_sriov"
585             if vlan_setting:
586                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
587         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
588             if tenant_settings['overlay_id_range']:
589                 physnets = tenant_settings['overlay_id_range'].split(',')
590                 output_line = "  NeutronBridgeMappings: "
591                 for physnet in physnets:
592                     physnet_name = physnet.split(':')[0]
593                     if physnet_name != 'datacentre':
594                         output_line += "{}:br-vlan,".format(physnet_name)
595                 output_line += "datacentre:br-ex"
596         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
597                 and ds_opts['sdn_controller'] == 'opendaylight':
598             if tenant_settings['overlay_id_range']:
599                 physnets = tenant_settings['overlay_id_range'].split(',')
600                 output_line = "  OpenDaylightProviderMappings: "
601                 for physnet in physnets:
602                     physnet_name = physnet.split(':')[0]
603                     if physnet_name != 'datacentre':
604                         output_line += "{}:br-vlan,".format(physnet_name)
605                 output_line += "datacentre:br-ex"
606         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
607             output_line = "  NeutronNetworkType: vlan\n" \
608                           "  NeutronTunnelTypes: ''"
609
610         if ds_opts['sdn_controller'] == 'opendaylight' and \
611                 'odl_vpp_routing_node' in ds_opts:
612             if 'opendaylight::vpp_routing_node' in line:
613                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
614                                .format(ds_opts['odl_vpp_routing_node'],
615                                        ns['domain_name']))
616         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
617             if 'NeutronVPPAgentPhysnets' in line:
618                 # VPP interface tap0 will be used for external network
619                 # connectivity.
620                 output_line = ("  NeutronVPPAgentPhysnets: "
621                                "'datacentre:{},external:tap0'"
622                                .format(tenant_nic['Controller']))
623         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
624                 'dvr') is True:
625             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
626                 output_line = ''
627             elif 'NeutronDhcpAgentsPerNetwork' in line:
628                 if num_compute == 0:
629                     num_dhcp_agents = num_control
630                 else:
631                     num_dhcp_agents = num_compute
632                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
633                                .format(num_dhcp_agents))
634             elif 'ComputeServices' in line:
635                 output_line = ("  ComputeServices:\n"
636                                "    - OS::TripleO::Services::NeutronDhcpAgent")
637
638         if perf:
639             for role in 'NovaCompute', 'Controller':
640                 if role == 'NovaCompute':
641                     perf_opts = perf_vpp_comp
642                 else:
643                     perf_opts = perf_vpp_ctrl
644                 cfg = "{}ExtraConfig".format(role)
645                 if cfg in line and perf_opts:
646                     perf_line = ''
647                     if 'main-core' in perf_opts:
648                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
649                                       .format(perf_opts['main-core']))
650                     if 'corelist-workers' in perf_opts:
651                         perf_line += ("\n    "
652                                       "fdio::vpp_cpu_corelist_workers: '{}'"
653                                       .format(perf_opts['corelist-workers']))
654                     if ds_opts['sdn_controller'] == 'opendaylight' and \
655                             ds_opts['dataplane'] == 'fdio':
656                         if role == 'NovaCompute':
657                             perf_line += ("\n    "
658                                           "tripleo::profile::base::neutron::"
659                                           "agents::honeycomb::"
660                                           "interface_role_mapping:"
661                                           " ['{}:tenant-interface',"
662                                           "'{}:public-interface']"
663                                           .format(tenant_nic[role],
664                                                   external_nic[role]))
665                         else:
666                             perf_line += ("\n    "
667                                           "tripleo::profile::base::neutron::"
668                                           "agents::honeycomb::"
669                                           "interface_role_mapping:"
670                                           " ['{}:tenant-interface']"
671                                           .format(tenant_nic[role]))
672                     if perf_line:
673                         output_line = ("  {}:{}".format(cfg, perf_line))
674
675             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
676                 for k, v in OVS_PERF_MAP.items():
677                     if k in line and v in perf_ovs_comp:
678                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
679
680             # kernel args
681             # (FIXME) use compute's kernel settings for all nodes for now.
682             if perf_kern_comp:
683                 if 'NovaSchedulerDefaultFilters' in line:
684                     output_line = \
685                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
686                         "ComputeFilter,AvailabilityZoneFilter," \
687                         "ComputeCapabilitiesFilter," \
688                         "ImagePropertiesFilter,NUMATopologyFilter'"
689                 elif 'ComputeKernelArgs' in line:
690                     kernel_args = ''
691                     for k, v in perf_kern_comp.items():
692                         kernel_args += "{}={} ".format(k, v)
693                     if kernel_args:
694                         output_line = "  ComputeKernelArgs: '{}'".\
695                             format(kernel_args)
696
697         print(output_line)
698
699     # Merge compute services into control services if only a single
700     # node deployment
701     if num_compute == 0:
702         logging.info("All in one deployment. Checking if service merging "
703                      "required into control services")
704         with open(tmp_opnfv_env, 'r') as fh:
705             data = yaml.safe_load(fh)
706         param_data = data['parameter_defaults']
707         # Check to see if any parameters are set for Compute
708         for param in param_data.keys():
709             if param != 'ComputeServices' and param.startswith('Compute'):
710                 logging.warning("Compute parameter set, but will not be used "
711                                 "in deployment: {}. Please use Controller "
712                                 "based parameters when using All-in-one "
713                                 "deployments".format(param))
714         if ('ControllerServices' in param_data and 'ComputeServices' in
715                 param_data):
716             logging.info("Services detected in environment file. Merging...")
717             ctrl_services = param_data['ControllerServices']
718             cmp_services = param_data['ComputeServices']
719             param_data['ControllerServices'] = list(set().union(
720                 ctrl_services, cmp_services))
721             for dup_service in DUPLICATE_COMPUTE_SERVICES:
722                 if dup_service in param_data['ControllerServices']:
723                     param_data['ControllerServices'].remove(dup_service)
724             param_data.pop('ComputeServices')
725             logging.debug("Merged controller services: {}".format(
726                 pprint.pformat(param_data['ControllerServices'])
727             ))
728             with open(tmp_opnfv_env, 'w') as fh:
729                 yaml.safe_dump(data, fh, default_flow_style=False)
730         else:
731             logging.info("No services detected in env file, not merging "
732                          "services")
733
734     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
735     with open(tmp_opnfv_env, 'r') as fh:
736         logging.debug("opnfv-environment content is : {}".format(
737             pprint.pformat(yaml.safe_load(fh.read()))
738         ))
739
740
741 def generate_ceph_key():
742     key = os.urandom(16)
743     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
744     return base64.b64encode(header + key)
745
746
747 def prep_storage_env(ds, ns, virtual, tmp_dir):
748     """
749     Creates storage environment file for deployment.  Source file is copied by
750     undercloud playbook to host.
751     :param ds:
752     :param ns:
753     :param virtual:
754     :param tmp_dir:
755     :return:
756     """
757     ds_opts = ds['deploy_options']
758     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
759     if not os.path.isfile(storage_file):
760         logging.error("storage-environment file is not in tmp directory: {}. "
761                       "Check if file was copied from "
762                       "undercloud".format(tmp_dir))
763         raise ApexDeployException("storage-environment file not copied from "
764                                   "undercloud")
765     for line in fileinput.input(storage_file, inplace=True):
766         line = line.strip('\n')
767         if 'CephClusterFSID' in line:
768             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
769         elif 'CephMonKey' in line:
770             print("  CephMonKey: {}".format(generate_ceph_key().decode(
771                 'utf-8')))
772         elif 'CephAdminKey' in line:
773             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
774                 'utf-8')))
775         elif 'CephClientKey' in line:
776             print("  CephClientKey: {}".format(generate_ceph_key().decode(
777                 'utf-8')))
778         else:
779             print(line)
780
781     if ds_opts['containers']:
782         ceph_params = {}
783
784         # max pgs allowed are calculated as num_mons * 200. Therefore we
785         # set number of pgs and pools so that the total will be less:
786         # num_pgs * num_pools * num_osds
787         ceph_params['CephPoolDefaultSize'] = 2
788         ceph_params['CephPoolDefaultPgNum'] = 32
789         if virtual:
790             ceph_params['CephAnsibleExtraConfig'] = {
791                 'centos_package_dependencies': [],
792                 'ceph_osd_docker_memory_limit': '1g',
793                 'ceph_mds_docker_memory_limit': '1g',
794             }
795         ceph_device = ds_opts['ceph_device']
796         ceph_params['CephAnsibleDisksConfig'] = {
797             'devices': [ceph_device],
798             'journal_size': 512,
799             'osd_scenario': 'collocated'
800         }
801         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
802     # TODO(trozet): remove following block as we only support containers now
803     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
804         with open(storage_file, 'a') as fh:
805             fh.write('  ExtraConfig:\n')
806             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
807                 ds_opts['ceph_device']
808             ))
809
810
811 def prep_sriov_env(ds, tmp_dir):
812     """
813     Creates SRIOV environment file for deployment. Source file is copied by
814     undercloud playbook to host.
815     :param ds:
816     :param tmp_dir:
817     :return:
818     """
819     ds_opts = ds['deploy_options']
820     sriov_iface = ds_opts['sriov']
821     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
822     if not os.path.isfile(sriov_file):
823         logging.error("sriov-environment file is not in tmp directory: {}. "
824                       "Check if file was copied from "
825                       "undercloud".format(tmp_dir))
826         raise ApexDeployException("sriov-environment file not copied from "
827                                   "undercloud")
828     # TODO(rnoriega): Instead of line editing, refactor this code to load
829     # yaml file into a dict, edit it and write the file back.
830     for line in fileinput.input(sriov_file, inplace=True):
831         line = line.strip('\n')
832         if 'NovaSchedulerDefaultFilters' in line:
833             print("  {}".format(line[3:]))
834         elif 'NovaSchedulerAvailableFilters' in line:
835             print("  {}".format(line[3:]))
836         elif 'NeutronPhysicalDevMappings' in line:
837             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
838                   .format(sriov_iface))
839         elif 'NeutronSriovNumVFs' in line:
840             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
841         elif 'NovaPCIPassthrough' in line:
842             print("  NovaPCIPassthrough:")
843         elif 'devname' in line:
844             print("    - devname: \"{}\"".format(sriov_iface))
845         elif 'physical_network' in line:
846             print("      physical_network: \"nfv_sriov\"")
847         else:
848             print(line)
849
850
851 def external_network_cmds(ns, ds):
852     """
853     Generates external network openstack commands
854     :param ns: network settings
855     :param ds: deploy settings
856     :return: list of commands to configure external network
857     """
858     ds_opts = ds['deploy_options']
859     external_physnet = 'datacentre'
860     if ds_opts['dataplane'] == 'fdio' and \
861        ds_opts['sdn_controller'] != 'opendaylight':
862         external_physnet = 'external'
863     if 'external' in ns.enabled_network_list:
864         net_config = ns['networks']['external'][0]
865         external = True
866         pool_start, pool_end = net_config['floating_ip_range']
867     else:
868         net_config = ns['networks']['admin']
869         external = False
870         pool_start, pool_end = ns['apex']['networks']['admin'][
871             'introspection_range']
872     nic_config = net_config['nic_mapping']
873     gateway = net_config['gateway']
874     cmds = list()
875     # create network command
876     if nic_config['compute']['vlan'] == 'native':
877         ext_type = 'flat'
878     else:
879         ext_type = "vlan --provider-segment {}".format(nic_config[
880                                                        'compute']['vlan'])
881     cmds.append("openstack network create external --project service "
882                 "--external --provider-network-type {} "
883                 "--provider-physical-network {}"
884                 .format(ext_type, external_physnet))
885     # create subnet command
886     cidr = net_config['cidr']
887     subnet_cmd = "openstack subnet create external-subnet --project " \
888                  "service --network external --no-dhcp --gateway {} " \
889                  "--allocation-pool start={},end={} --subnet-range " \
890                  "{}".format(gateway, pool_start, pool_end, str(cidr))
891     if external and cidr.version == 6:
892         subnet_cmd += ' --ip-version 6'
893     cmds.append(subnet_cmd)
894     logging.debug("Neutron external network commands determined "
895                   "as: {}".format(cmds))
896     return cmds
897
898
899 def create_congress_cmds(overcloud_file):
900     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
901     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
902     logging.info("Creating congress commands")
903     try:
904         ds_cfg = [
905             "username={}".format(overcloudrc['OS_USERNAME']),
906             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
907             "password={}".format(overcloudrc['OS_PASSWORD']),
908             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
909         ]
910     except KeyError:
911         logging.error("Unable to find all keys required for congress in "
912                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
913                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
914                       "file: {}".format(overcloud_file))
915         raise
916     cmds = list()
917     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
918
919     for driver in drivers:
920         if driver == 'doctor':
921             cmd = "{} \"{}\"".format(driver, driver)
922         else:
923             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
924         if driver == 'nova':
925             cmd += ' --config api_version="2.34"'
926         logging.debug("Congress command created: {}".format(cmd))
927         cmds.append(cmd)
928     return cmds