27263740fc5ba25d14da944d781541d73ea00303
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import pprint
16 import shutil
17 import uuid
18 import struct
19 import time
20 import yaml
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
23
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
30     crypto_serialization
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33     crypto_default_backend
34
35
36 SDN_FILE_MAP = {
37     'opendaylight': {
38         'sfc': 'neutron-sfc-opendaylight.yaml',
39         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40         'gluon': 'gluon.yaml',
41         'vpp': {
42             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44             'default': 'neutron-opendaylight-honeycomb.yaml'
45         },
46         'l2gw': 'neutron-l2gw-opendaylight.yaml',
47         'sriov': 'neutron-opendaylight-sriov.yaml',
48         'default': 'neutron-opendaylight.yaml',
49     },
50     'onos': {
51         'sfc': 'neutron-onos-sfc.yaml',
52         'default': 'neutron-onos.yaml'
53     },
54     'ovn': 'neutron-ml2-ovn.yaml',
55     False: {
56         'vpp': 'neutron-ml2-vpp.yaml',
57         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
58     }
59 }
60
61 OTHER_FILE_MAP = {
62     'tacker': 'enable_tacker.yaml',
63     'congress': 'enable_congress.yaml',
64     'barometer': 'enable_barometer.yaml',
65     'rt_kvm': 'enable_rt_kvm.yaml'
66 }
67
68 OVS_PERF_MAP = {
69     'HostCpusList': 'dpdk_cores',
70     'NeutronDpdkCoreList': 'pmd_cores',
71     'NeutronDpdkSocketMemory': 'socket_memory',
72     'NeutronDpdkMemoryChannels': 'memory_channels'
73 }
74
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95 DUPLICATE_COMPUTE_SERVICES = [
96     'OS::TripleO::Services::ComputeNeutronCorePlugin',
97     'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98     'OS::TripleO::Services::ComputeNeutronOvsAgent',
99     'OS::TripleO::Services::ComputeNeutronL3Agent'
100 ]
101
102 NFS_VARS = [
103     'NovaNfsEnabled',
104     'GlanceNfsEnabled',
105     'CinderNfsEnabledBackend'
106 ]
107
108
109 def build_sdn_env_list(ds, sdn_map, env_list=None):
110     """
111     Builds a list of SDN environment files to be used in the deploy cmd.
112
113     This function recursively searches an sdn_map.  First the sdn controller is
114     matched and then the function looks for enabled features for that
115     controller to determine which environment files should be used.  By
116     default the feature will be checked if set to true in deploy settings to be
117     added to the list.  If a feature does not have a boolean value, then the
118     key and value pair to compare with are checked as a tuple (k,v).
119
120     :param ds: deploy settings
121     :param sdn_map: SDN map to recursively search
122     :param env_list: recursive var to hold previously found env_list
123     :return: A list of env files
124     """
125     if env_list is None:
126         env_list = list()
127     for k, v in sdn_map.items():
128         if ds['sdn_controller'] == k or (k in ds and ds[k]):
129             if isinstance(v, dict):
130                 # Append default SDN env file first
131                 # The assumption is that feature-enabled SDN env files
132                 # override and do not conflict with previously set default
133                 # settings
134                 if ds['sdn_controller'] == k and 'default' in v:
135                     env_list.append(os.path.join(con.THT_ENV_DIR,
136                                                  v['default']))
137                 env_list.extend(build_sdn_env_list(ds, v))
138             # check if the value is not a boolean
139             elif isinstance(v, tuple):
140                     if ds[k] == v[0]:
141                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
142             else:
143                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
144     if len(env_list) == 0:
145         try:
146             env_list.append(os.path.join(
147                 con.THT_ENV_DIR, sdn_map['default']))
148         except KeyError:
149             logging.warning("Unable to find default file for SDN")
150
151     return env_list
152
153
154 def get_docker_sdn_files(ds_opts):
155     """
156     Returns docker env file for detected SDN
157     :param ds_opts: deploy options
158     :return: list of docker THT env files for an SDN
159     """
160     docker_services = con.VALID_DOCKER_SERVICES
161     tht_dir = con.THT_DOCKER_ENV_DIR
162     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
163     for i, sdn_file in enumerate(sdn_env_list):
164         sdn_base = os.path.basename(sdn_file)
165         if sdn_base in docker_services:
166             if docker_services[sdn_base] is not None:
167                 sdn_env_list[i] = \
168                     os.path.join(tht_dir, docker_services[sdn_base])
169             else:
170                 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
171     return sdn_env_list
172
173
174 def create_deploy_cmd(ds, ns, inv, tmp_dir,
175                       virtual, env_file='opnfv-environment.yaml',
176                       net_data=False):
177
178     logging.info("Creating deployment command")
179     deploy_options = ['network-environment.yaml']
180
181     ds_opts = ds['deploy_options']
182
183     if ds_opts['containers']:
184         deploy_options.append(os.path.join(con.THT_ENV_DIR,
185                                            'docker.yaml'))
186
187     if ds['global_params']['ha_enabled']:
188         if ds_opts['containers']:
189             deploy_options.append(os.path.join(con.THT_ENV_DIR,
190                                                'docker-ha.yaml'))
191         else:
192             deploy_options.append(os.path.join(con.THT_ENV_DIR,
193                                                'puppet-pacemaker.yaml'))
194
195     if env_file:
196         deploy_options.append(env_file)
197
198     if ds_opts['containers']:
199         deploy_options.append('docker-images.yaml')
200         sdn_docker_files = get_docker_sdn_files(ds_opts)
201         for sdn_docker_file in sdn_docker_files:
202             deploy_options.append(sdn_docker_file)
203     else:
204         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
205
206     for k, v in OTHER_FILE_MAP.items():
207         if k in ds_opts and ds_opts[k]:
208             if ds_opts['containers']:
209                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
210                                                    "{}.yaml".format(k)))
211             else:
212                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
213
214     # TODO(trozet) Fix this check to look for if ceph is in controller services
215     # and not use name of the file
216     if ds_opts['ceph'] and 'csit' not in env_file:
217         prep_storage_env(ds, ns, virtual, tmp_dir)
218         deploy_options.append(os.path.join(con.THT_ENV_DIR,
219                                            'storage-environment.yaml'))
220     if ds_opts['sriov']:
221         prep_sriov_env(ds, tmp_dir)
222
223     # Check for 'k8s' here intentionally, as we may support other values
224     # such as openstack/openshift for 'vim' option.
225     if ds_opts['vim'] == 'k8s':
226         deploy_options.append('kubernetes-environment.yaml')
227
228     if virtual:
229         deploy_options.append('virtual-environment.yaml')
230     else:
231         deploy_options.append('baremetal-environment.yaml')
232
233     num_control, num_compute = inv.get_node_counts()
234     if num_control > 1 and not ds['global_params']['ha_enabled']:
235         num_control = 1
236     if platform.machine() == 'aarch64':
237         # aarch64 deploys were not completing in the default 90 mins.
238         # Not sure if this is related to the hardware the OOO support
239         # was developed on or the virtualization support in CentOS
240         # Either way it will probably get better over time  as the aarch
241         # support matures in CentOS and deploy time should be tested in
242         # the future so this multiplier can be removed.
243         con.DEPLOY_TIMEOUT *= 2
244     cmd = "openstack overcloud deploy --templates --timeout {} " \
245           .format(con.DEPLOY_TIMEOUT)
246     # build cmd env args
247     for option in deploy_options:
248         cmd += " -e {}".format(option)
249     cmd += " --ntp-server {}".format(ns['ntp'][0])
250     cmd += " --control-scale {}".format(num_control)
251     cmd += " --compute-scale {}".format(num_compute)
252     cmd += ' --control-flavor control --compute-flavor compute'
253     if net_data:
254         cmd += ' --networks-file network_data.yaml'
255     libvirt_type = 'kvm'
256     if virtual and (platform.machine() != 'aarch64'):
257         with open('/sys/module/kvm_intel/parameters/nested') as f:
258             nested_kvm = f.read().strip()
259             if nested_kvm != 'Y':
260                 libvirt_type = 'qemu'
261     elif virtual and (platform.machine() == 'aarch64'):
262         libvirt_type = 'qemu'
263     cmd += ' --libvirt-type {}'.format(libvirt_type)
264     if platform.machine() == 'aarch64':
265         cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
266     logging.info("Deploy command set: {}".format(cmd))
267
268     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
269         fh.write(cmd)
270     return cmd
271
272
273 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
274                patches=None):
275     """
276     Locates sdn image and preps for deployment.
277     :param ds: deploy settings
278     :param ns: network settings
279     :param img: sdn image
280     :param tmp_dir: dir to store modified sdn image
281     :param root_pw: password to configure for overcloud image
282     :param docker_tag: Docker image tag for RDO version (default None)
283     :param patches: List of patches to apply to overcloud image
284     :return: None
285     """
286     # TODO(trozet): Come up with a better way to organize this logic in this
287     # function
288     logging.info("Preparing image: {} for deployment".format(img))
289     if not os.path.isfile(img):
290         logging.error("Missing SDN image {}".format(img))
291         raise ApexDeployException("Missing SDN image file: {}".format(img))
292
293     ds_opts = ds['deploy_options']
294     virt_cmds = list()
295     sdn = ds_opts['sdn_controller']
296     patched_containers = set()
297     # we need this due to rhbz #1436021
298     # fixed in systemd-219-37.el7
299     if sdn is not False:
300         logging.info("Neutron openvswitch-agent disabled")
301         virt_cmds.extend([{
302             con.VIRT_RUN_CMD:
303                 "rm -f /etc/systemd/system/multi-user.target.wants/"
304                 "neutron-openvswitch-agent.service"},
305             {
306             con.VIRT_RUN_CMD:
307                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
308                 ".service"
309         }])
310
311     if ns.get('http_proxy', ''):
312         virt_cmds.append({
313             con.VIRT_RUN_CMD:
314                 "echo 'http_proxy={}' >> /etc/environment".format(
315                     ns['http_proxy'])})
316
317     if ns.get('https_proxy', ''):
318         virt_cmds.append({
319             con.VIRT_RUN_CMD:
320                 "echo 'https_proxy={}' >> /etc/environment".format(
321                     ns['https_proxy'])})
322
323     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
324     shutil.copyfile(img, tmp_oc_image)
325     logging.debug("Temporary overcloud image stored as: {}".format(
326         tmp_oc_image))
327
328     if ds_opts['vpn']:
329         oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
330         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
331         virt_cmds.append({
332             con.VIRT_RUN_CMD:
333                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
334                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
335         virt_cmds.append({
336             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
337                               "zrpcd_start.sh"})
338         virt_cmds.append({
339             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
340                               "init.d/zrpcd_start.sh' /etc/rc.local "})
341         virt_cmds.append({
342             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
343                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
344         logging.info("ZRPCD process started")
345
346     dataplane = ds_opts['dataplane']
347     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
348         logging.info("Enabling kernel modules for dpdk")
349         # file to module mapping
350         uio_types = {
351             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
352             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
353         }
354         for mod_file, mod in uio_types.items():
355             with open(mod_file, 'w') as fh:
356                 fh.write('#!/bin/bash\n')
357                 fh.write('exec /sbin/modprobe {}'.format(mod))
358                 fh.close()
359
360             virt_cmds.extend([
361                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
362                     mod_file)},
363                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
364                                    "{}".format(os.path.basename(mod_file))}
365             ])
366     if root_pw:
367         pw_op = "password:{}".format(root_pw)
368         virt_cmds.append({con.VIRT_PW: pw_op})
369
370     if dataplane == 'ovs':
371         # FIXME(trozet) remove this after RDO is updated with fix for
372         # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
373         # https://review.rdoproject.org/r/#/c/13839/
374         oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
375
376     if dataplane == 'fdio':
377         # Patch neutron with using OVS external interface for router
378         # and add generic linux NS interface driver
379         virt_cmds.append(
380             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
381                                "-p1 < neutron-patch-NSDriver.patch"})
382         if sdn is False:
383             virt_cmds.extend([
384                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
385                 {con.VIRT_RUN_CMD: "yum install -y "
386                                    "/root/nosdn_vpp_rpms/*.rpm"}
387             ])
388
389     undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
390         'installer_vm']['ip']
391     if sdn == 'opendaylight':
392         oc_builder.inject_opendaylight(
393             odl_version=ds_opts['odl_version'],
394             image=tmp_oc_image,
395             tmp_dir=tmp_dir,
396             uc_ip=undercloud_admin_ip,
397             os_version=ds_opts['os_version'],
398             docker_tag=docker_tag,
399         )
400         if docker_tag:
401             patched_containers = patched_containers.union({'opendaylight'})
402
403     if patches:
404         if ds_opts['os_version'] == 'master':
405             branch = ds_opts['os_version']
406         else:
407             branch = "stable/{}".format(ds_opts['os_version'])
408         logging.info('Adding patches to overcloud')
409         patched_containers = patched_containers.union(
410             c_builder.add_upstream_patches(patches,
411                                            tmp_oc_image, tmp_dir,
412                                            branch,
413                                            uc_ip=undercloud_admin_ip,
414                                            docker_tag=docker_tag))
415     # if containers with ceph, and no ceph device we need to use a
416     # persistent loop device for Ceph OSDs
417     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
418         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
419         with open(tmp_losetup, 'w') as fh:
420             fh.write(LOSETUP_SERVICE)
421         virt_cmds.extend([
422             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
423              },
424             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
425                 .format(LOOP_DEVICE_SIZE)},
426             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
427             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
428         ])
429     # TODO(trozet) remove this after LP#173474 is fixed
430     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
431     virt_cmds.append(
432         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
433                            "ConditionPathExists".format(dhcp_unit)})
434     # Prep for NFS
435     virt_cmds.extend([
436         {con.VIRT_INSTALL: "nfs-utils"},
437         {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
438                            "/etc/systemd/system/multi-user.target.wants/"
439                            "nfs-server.service"},
440         {con.VIRT_RUN_CMD: "mkdir -p /glance"},
441         {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
442         {con.VIRT_RUN_CMD: "mkdir -p /nova"},
443         {con.VIRT_RUN_CMD: "chmod 777 /glance"},
444         {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
445         {con.VIRT_RUN_CMD: "chmod 777 /nova"},
446         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /glance"},
447         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /cinder"},
448         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /nova"},
449         {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
450                            "no_root_squash,no_acl)' > /etc/exports"},
451         {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
452                            "no_root_squash,no_acl)' >> /etc/exports"},
453         {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
454                            "no_root_squash,no_acl)' >> /etc/exports"},
455         {con.VIRT_RUN_CMD: "exportfs -avr"},
456     ])
457     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
458     logging.info("Overcloud image customization complete")
459     return patched_containers
460
461
462 def make_ssh_key():
463     """
464     Creates public and private ssh keys with 1024 bit RSA encryption
465     :return: private, public key
466     """
467     key = rsa.generate_private_key(
468         backend=crypto_default_backend(),
469         public_exponent=65537,
470         key_size=1024
471     )
472
473     private_key = key.private_bytes(
474         crypto_serialization.Encoding.PEM,
475         crypto_serialization.PrivateFormat.PKCS8,
476         crypto_serialization.NoEncryption())
477     public_key = key.public_key().public_bytes(
478         crypto_serialization.Encoding.OpenSSH,
479         crypto_serialization.PublicFormat.OpenSSH
480     )
481     return private_key.decode('utf-8'), public_key.decode('utf-8')
482
483
484 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
485     """
486     Creates modified opnfv/network environments for deployment
487     :param ds: deploy settings
488     :param ns: network settings
489     :param inv: node inventory
490     :param opnfv_env: file path for opnfv-environment file
491     :param net_env: file path for network-environment file
492     :param tmp_dir: Apex tmp dir
493     :return:
494     """
495
496     logging.info("Preparing opnfv-environment and network-environment files")
497     ds_opts = ds['deploy_options']
498     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
499     shutil.copyfile(opnfv_env, tmp_opnfv_env)
500     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
501     tenant_nic = dict()
502     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
503     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
504     external_nic_map = ns['networks']['external'][0]['nic_mapping']
505     external_nic = dict()
506     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
507
508     # SSH keys
509     private_key, public_key = make_ssh_key()
510
511     num_control, num_compute = inv.get_node_counts()
512     if num_control > 1 and not ds['global_params']['ha_enabled']:
513         num_control = 1
514
515     # Make easier/faster variables to index in the file editor
516     if 'performance' in ds_opts:
517         perf = True
518         # vpp
519         if 'vpp' in ds_opts['performance']['Compute']:
520             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
521         else:
522             perf_vpp_comp = None
523         if 'vpp' in ds_opts['performance']['Controller']:
524             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
525         else:
526             perf_vpp_ctrl = None
527
528         # ovs
529         if 'ovs' in ds_opts['performance']['Compute']:
530             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
531         else:
532             perf_ovs_comp = None
533
534         # kernel
535         if 'kernel' in ds_opts['performance']['Compute']:
536             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
537         else:
538             perf_kern_comp = None
539     else:
540         perf = False
541
542     tenant_settings = ns['networks']['tenant']
543     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
544         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
545
546     # Modify OPNFV environment
547     # TODO: Change to build a dict and outputting yaml rather than parsing
548     for line in fileinput.input(tmp_opnfv_env, inplace=True):
549         line = line.strip('\n')
550         output_line = line
551         if 'CloudDomain' in line:
552             output_line = "  CloudDomain: {}".format(ns['domain_name'])
553         elif 'replace_private_key' in line:
554             output_line = "    private_key: |\n"
555             key_out = ''
556             for line in private_key.splitlines():
557                 key_out += "      {}\n".format(line)
558             output_line += key_out
559         elif 'replace_public_key' in line:
560             output_line = "    public_key: '{}'".format(public_key)
561         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
562                 'resource_registry' in line:
563             output_line = "resource_registry:\n" \
564                           "  OS::TripleO::NodeUserData: first-boot.yaml"
565         elif 'ComputeExtraConfigPre' in line and \
566                 ds_opts['dataplane'] == 'ovs_dpdk':
567             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
568                           './ovs-dpdk-preconfig.yaml'
569         elif 'NeutronNetworkVLANRanges' in line:
570             vlan_setting = ''
571             if tenant_vlan_enabled:
572                 if ns['networks']['tenant']['overlay_id_range']:
573                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
574                     if 'datacentre' not in vlan_setting:
575                         vlan_setting += ',datacentre:1:1000'
576             # SRIOV networks are VLAN based provider networks. In order to
577             # simplify the deployment, nfv_sriov will be the default physnet.
578             # VLANs are not needed in advance, and the user will have to create
579             # the network specifying the segmentation-id.
580             if ds_opts['sriov']:
581                 if vlan_setting:
582                     vlan_setting += ",nfv_sriov"
583                 else:
584                     vlan_setting = "datacentre:1:1000,nfv_sriov"
585             if vlan_setting:
586                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
587         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
588             if tenant_settings['overlay_id_range']:
589                 physnets = tenant_settings['overlay_id_range'].split(',')
590                 output_line = "  NeutronBridgeMappings: "
591                 for physnet in physnets:
592                     physnet_name = physnet.split(':')[0]
593                     if physnet_name != 'datacentre':
594                         output_line += "{}:br-vlan,".format(physnet_name)
595                 output_line += "datacentre:br-ex"
596         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
597                 and ds_opts['sdn_controller'] == 'opendaylight':
598             if tenant_settings['overlay_id_range']:
599                 physnets = tenant_settings['overlay_id_range'].split(',')
600                 output_line = "  OpenDaylightProviderMappings: "
601                 for physnet in physnets:
602                     physnet_name = physnet.split(':')[0]
603                     if physnet_name != 'datacentre':
604                         output_line += "{}:br-vlan,".format(physnet_name)
605                 output_line += "datacentre:br-ex"
606         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
607             output_line = "  NeutronNetworkType: vlan\n" \
608                           "  NeutronTunnelTypes: ''"
609
610         if ds_opts['sdn_controller'] == 'opendaylight' and \
611                 'odl_vpp_routing_node' in ds_opts:
612             if 'opendaylight::vpp_routing_node' in line:
613                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
614                                .format(ds_opts['odl_vpp_routing_node'],
615                                        ns['domain_name']))
616         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
617             if 'NeutronVPPAgentPhysnets' in line:
618                 # VPP interface tap0 will be used for external network
619                 # connectivity.
620                 output_line = ("  NeutronVPPAgentPhysnets: "
621                                "'datacentre:{},external:tap0'"
622                                .format(tenant_nic['Controller']))
623         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
624                 'dvr') is True:
625             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
626                 output_line = ''
627             elif 'NeutronDhcpAgentsPerNetwork' in line:
628                 if num_compute == 0:
629                     num_dhcp_agents = num_control
630                 else:
631                     num_dhcp_agents = num_compute
632                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
633                                .format(num_dhcp_agents))
634             elif 'ComputeServices' in line:
635                 output_line = ("  ComputeServices:\n"
636                                "    - OS::TripleO::Services::NeutronDhcpAgent")
637
638         if perf:
639             for role in 'NovaCompute', 'Controller':
640                 if role == 'NovaCompute':
641                     perf_opts = perf_vpp_comp
642                 else:
643                     perf_opts = perf_vpp_ctrl
644                 cfg = "{}ExtraConfig".format(role)
645                 if cfg in line and perf_opts:
646                     perf_line = ''
647                     if 'main-core' in perf_opts:
648                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
649                                       .format(perf_opts['main-core']))
650                     if 'corelist-workers' in perf_opts:
651                         perf_line += ("\n    "
652                                       "fdio::vpp_cpu_corelist_workers: '{}'"
653                                       .format(perf_opts['corelist-workers']))
654                     if ds_opts['sdn_controller'] == 'opendaylight' and \
655                             ds_opts['dataplane'] == 'fdio':
656                         if role == 'NovaCompute':
657                             perf_line += ("\n    "
658                                           "tripleo::profile::base::neutron::"
659                                           "agents::honeycomb::"
660                                           "interface_role_mapping:"
661                                           " ['{}:tenant-interface',"
662                                           "'{}:public-interface']"
663                                           .format(tenant_nic[role],
664                                                   external_nic[role]))
665                         else:
666                             perf_line += ("\n    "
667                                           "tripleo::profile::base::neutron::"
668                                           "agents::honeycomb::"
669                                           "interface_role_mapping:"
670                                           " ['{}:tenant-interface']"
671                                           .format(tenant_nic[role]))
672                     if perf_line:
673                         output_line = ("  {}:{}".format(cfg, perf_line))
674
675             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
676                 for k, v in OVS_PERF_MAP.items():
677                     if k in line and v in perf_ovs_comp:
678                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
679
680             # kernel args
681             # (FIXME) use compute's kernel settings for all nodes for now.
682             if perf_kern_comp:
683                 if 'NovaSchedulerDefaultFilters' in line:
684                     output_line = \
685                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
686                         "ComputeFilter,AvailabilityZoneFilter," \
687                         "ComputeCapabilitiesFilter," \
688                         "ImagePropertiesFilter,NUMATopologyFilter'"
689                 elif 'ComputeKernelArgs' in line:
690                     kernel_args = ''
691                     for k, v in perf_kern_comp.items():
692                         kernel_args += "{}={} ".format(k, v)
693                     if kernel_args:
694                         output_line = "  ComputeKernelArgs: '{}'".\
695                             format(kernel_args)
696
697         print(output_line)
698
699     # Merge compute services into control services if only a single
700     # node deployment
701     if num_compute == 0:
702         with open(tmp_opnfv_env, 'r') as fh:
703             data = yaml.safe_load(fh)
704         param_data = data['parameter_defaults']
705         logging.info("All in one deployment detected")
706         logging.info("Disabling NFS in env file")
707         # Check to see if any parameters are set for Compute
708         for param in param_data.keys():
709             if param != 'ComputeServices' and param.startswith('Compute'):
710                 logging.warning("Compute parameter set, but will not be used "
711                                 "in deployment: {}. Please use Controller "
712                                 "based parameters when using All-in-one "
713                                 "deployments".format(param))
714             if param in NFS_VARS:
715                 param_data[param] = False
716         logging.info("Checking if service merging required into "
717                      "control services")
718         if ('ControllerServices' in param_data and 'ComputeServices' in
719                 param_data):
720             logging.info("Services detected in environment file. Merging...")
721             ctrl_services = param_data['ControllerServices']
722             cmp_services = param_data['ComputeServices']
723             param_data['ControllerServices'] = list(set().union(
724                 ctrl_services, cmp_services))
725             for dup_service in DUPLICATE_COMPUTE_SERVICES:
726                 if dup_service in param_data['ControllerServices']:
727                     param_data['ControllerServices'].remove(dup_service)
728             param_data.pop('ComputeServices')
729             logging.debug("Merged controller services: {}".format(
730                 pprint.pformat(param_data['ControllerServices'])
731             ))
732         else:
733             logging.info("No services detected in env file, not merging "
734                          "services")
735         with open(tmp_opnfv_env, 'w') as fh:
736             yaml.safe_dump(data, fh, default_flow_style=False)
737
738     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
739     with open(tmp_opnfv_env, 'r') as fh:
740         logging.debug("opnfv-environment content is : {}".format(
741             pprint.pformat(yaml.safe_load(fh.read()))
742         ))
743
744
745 def generate_ceph_key():
746     key = os.urandom(16)
747     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
748     return base64.b64encode(header + key)
749
750
751 def prep_storage_env(ds, ns, virtual, tmp_dir):
752     """
753     Creates storage environment file for deployment.  Source file is copied by
754     undercloud playbook to host.
755     :param ds:
756     :param ns:
757     :param virtual:
758     :param tmp_dir:
759     :return:
760     """
761     ds_opts = ds['deploy_options']
762     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
763     if not os.path.isfile(storage_file):
764         logging.error("storage-environment file is not in tmp directory: {}. "
765                       "Check if file was copied from "
766                       "undercloud".format(tmp_dir))
767         raise ApexDeployException("storage-environment file not copied from "
768                                   "undercloud")
769     for line in fileinput.input(storage_file, inplace=True):
770         line = line.strip('\n')
771         if 'CephClusterFSID' in line:
772             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
773         elif 'CephMonKey' in line:
774             print("  CephMonKey: {}".format(generate_ceph_key().decode(
775                 'utf-8')))
776         elif 'CephAdminKey' in line:
777             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
778                 'utf-8')))
779         elif 'CephClientKey' in line:
780             print("  CephClientKey: {}".format(generate_ceph_key().decode(
781                 'utf-8')))
782         else:
783             print(line)
784
785     if ds_opts['containers']:
786         ceph_params = {}
787
788         # max pgs allowed are calculated as num_mons * 200. Therefore we
789         # set number of pgs and pools so that the total will be less:
790         # num_pgs * num_pools * num_osds
791         ceph_params['CephPoolDefaultSize'] = 2
792         ceph_params['CephPoolDefaultPgNum'] = 32
793         if virtual:
794             ceph_params['CephAnsibleExtraConfig'] = {
795                 'centos_package_dependencies': [],
796                 'ceph_osd_docker_memory_limit': '1g',
797                 'ceph_mds_docker_memory_limit': '1g',
798             }
799         ceph_device = ds_opts['ceph_device']
800         ceph_params['CephAnsibleDisksConfig'] = {
801             'devices': [ceph_device],
802             'journal_size': 512,
803             'osd_scenario': 'collocated'
804         }
805         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
806     # TODO(trozet): remove following block as we only support containers now
807     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
808         with open(storage_file, 'a') as fh:
809             fh.write('  ExtraConfig:\n')
810             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
811                 ds_opts['ceph_device']
812             ))
813
814
815 def prep_sriov_env(ds, tmp_dir):
816     """
817     Creates SRIOV environment file for deployment. Source file is copied by
818     undercloud playbook to host.
819     :param ds:
820     :param tmp_dir:
821     :return:
822     """
823     ds_opts = ds['deploy_options']
824     sriov_iface = ds_opts['sriov']
825     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
826     if not os.path.isfile(sriov_file):
827         logging.error("sriov-environment file is not in tmp directory: {}. "
828                       "Check if file was copied from "
829                       "undercloud".format(tmp_dir))
830         raise ApexDeployException("sriov-environment file not copied from "
831                                   "undercloud")
832     # TODO(rnoriega): Instead of line editing, refactor this code to load
833     # yaml file into a dict, edit it and write the file back.
834     for line in fileinput.input(sriov_file, inplace=True):
835         line = line.strip('\n')
836         if 'NovaSchedulerDefaultFilters' in line:
837             print("  {}".format(line[3:]))
838         elif 'NovaSchedulerAvailableFilters' in line:
839             print("  {}".format(line[3:]))
840         elif 'NeutronPhysicalDevMappings' in line:
841             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
842                   .format(sriov_iface))
843         elif 'NeutronSriovNumVFs' in line:
844             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
845         elif 'NovaPCIPassthrough' in line:
846             print("  NovaPCIPassthrough:")
847         elif 'devname' in line:
848             print("    - devname: \"{}\"".format(sriov_iface))
849         elif 'physical_network' in line:
850             print("      physical_network: \"nfv_sriov\"")
851         else:
852             print(line)
853
854
855 def external_network_cmds(ns, ds):
856     """
857     Generates external network openstack commands
858     :param ns: network settings
859     :param ds: deploy settings
860     :return: list of commands to configure external network
861     """
862     ds_opts = ds['deploy_options']
863     external_physnet = 'datacentre'
864     if ds_opts['dataplane'] == 'fdio' and \
865        ds_opts['sdn_controller'] != 'opendaylight':
866         external_physnet = 'external'
867     if 'external' in ns.enabled_network_list:
868         net_config = ns['networks']['external'][0]
869         external = True
870         pool_start, pool_end = net_config['floating_ip_range']
871     else:
872         net_config = ns['networks']['admin']
873         external = False
874         pool_start, pool_end = ns['apex']['networks']['admin'][
875             'introspection_range']
876     nic_config = net_config['nic_mapping']
877     gateway = net_config['gateway']
878     cmds = list()
879     # create network command
880     if nic_config['compute']['vlan'] == 'native':
881         ext_type = 'flat'
882     else:
883         ext_type = "vlan --provider-segment {}".format(nic_config[
884                                                        'compute']['vlan'])
885     cmds.append("openstack network create external --project service "
886                 "--external --provider-network-type {} "
887                 "--provider-physical-network {}"
888                 .format(ext_type, external_physnet))
889     # create subnet command
890     cidr = net_config['cidr']
891     subnet_cmd = "openstack subnet create external-subnet --project " \
892                  "service --network external --no-dhcp --gateway {} " \
893                  "--allocation-pool start={},end={} --subnet-range " \
894                  "{}".format(gateway, pool_start, pool_end, str(cidr))
895     if external and cidr.version == 6:
896         subnet_cmd += ' --ip-version 6'
897     cmds.append(subnet_cmd)
898     logging.debug("Neutron external network commands determined "
899                   "as: {}".format(cmds))
900     return cmds
901
902
903 def create_congress_cmds(overcloud_file):
904     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
905     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
906     logging.info("Creating congress commands")
907     try:
908         ds_cfg = [
909             "username={}".format(overcloudrc['OS_USERNAME']),
910             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
911             "password={}".format(overcloudrc['OS_PASSWORD']),
912             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
913         ]
914     except KeyError:
915         logging.error("Unable to find all keys required for congress in "
916                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
917                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
918                       "file: {}".format(overcloud_file))
919         raise
920     cmds = list()
921     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
922
923     for driver in drivers:
924         if driver == 'doctor':
925             cmd = "{} \"{}\"".format(driver, driver)
926         else:
927             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
928         if driver == 'nova':
929             cmd += ' --config api_version="2.34"'
930         logging.debug("Congress command created: {}".format(cmd))
931         cmds.append(cmd)
932     return cmds