Add support for kubernetes deployment
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import shutil
16 import uuid
17 import struct
18 import time
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
21
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
28     crypto_serialization
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31     crypto_default_backend
32
33
34 SDN_FILE_MAP = {
35     'opendaylight': {
36         'sfc': 'neutron-sfc-opendaylight.yaml',
37         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38         'gluon': 'gluon.yaml',
39         'vpp': {
40             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42             'default': 'neutron-opendaylight-honeycomb.yaml'
43         },
44         'l2gw': 'neutron-l2gw-opendaylight.yaml',
45         'sriov': 'neutron-opendaylight-sriov.yaml',
46         'default': 'neutron-opendaylight.yaml',
47     },
48     'onos': {
49         'sfc': 'neutron-onos-sfc.yaml',
50         'default': 'neutron-onos.yaml'
51     },
52     'ovn': 'neutron-ml2-ovn.yaml',
53     False: {
54         'vpp': 'neutron-ml2-vpp.yaml',
55         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
56     }
57 }
58
59 OTHER_FILE_MAP = {
60     'tacker': 'enable_tacker.yaml',
61     'congress': 'enable_congress.yaml',
62     'barometer': 'enable_barometer.yaml',
63     'rt_kvm': 'enable_rt_kvm.yaml'
64 }
65
66 OVS_PERF_MAP = {
67     'HostCpusList': 'dpdk_cores',
68     'NeutronDpdkCoreList': 'pmd_cores',
69     'NeutronDpdkSocketMemory': 'socket_memory',
70     'NeutronDpdkMemoryChannels': 'memory_channels'
71 }
72
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOSETUP_SERVICE = """[Unit]
79 Description=Setup loop devices
80 Before=network.target
81
82 [Service]
83 Type=oneshot
84 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
85 ExecStop=/sbin/losetup -d /dev/loop3
86 TimeoutSec=60
87 RemainAfterExit=yes
88
89 [Install]
90 WantedBy=multi-user.target
91 """
92
93
94 def build_sdn_env_list(ds, sdn_map, env_list=None):
95     """
96     Builds a list of SDN environment files to be used in the deploy cmd.
97
98     This function recursively searches an sdn_map.  First the sdn controller is
99     matched and then the function looks for enabled features for that
100     controller to determine which environment files should be used.  By
101     default the feature will be checked if set to true in deploy settings to be
102     added to the list.  If a feature does not have a boolean value, then the
103     key and value pair to compare with are checked as a tuple (k,v).
104
105     :param ds: deploy settings
106     :param sdn_map: SDN map to recursively search
107     :param env_list: recursive var to hold previously found env_list
108     :return: A list of env files
109     """
110     if env_list is None:
111         env_list = list()
112     for k, v in sdn_map.items():
113         if ds['sdn_controller'] == k or (k in ds and ds[k]):
114             if isinstance(v, dict):
115                 # Append default SDN env file first
116                 # The assumption is that feature-enabled SDN env files
117                 # override and do not conflict with previously set default
118                 # settings
119                 if ds['sdn_controller'] == k and 'default' in v:
120                     env_list.append(os.path.join(con.THT_ENV_DIR,
121                                                  v['default']))
122                 env_list.extend(build_sdn_env_list(ds, v))
123             # check if the value is not a boolean
124             elif isinstance(v, tuple):
125                     if ds[k] == v[0]:
126                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
127             else:
128                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
129     if len(env_list) == 0:
130         try:
131             env_list.append(os.path.join(
132                 con.THT_ENV_DIR, sdn_map['default']))
133         except KeyError:
134             logging.warning("Unable to find default file for SDN")
135
136     return env_list
137
138
139 def get_docker_sdn_file(ds_opts):
140     """
141     Returns docker env file for detected SDN
142     :param ds_opts: deploy options
143     :return: docker THT env file for an SDN
144     """
145     # FIXME(trozet): We assume right now there is only one docker SDN file
146     docker_services = con.VALID_DOCKER_SERVICES
147     tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
148     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
149     for sdn_file in sdn_env_list:
150         sdn_base = os.path.basename(sdn_file)
151         if sdn_base in docker_services:
152             if docker_services[sdn_base] is not None:
153                 return os.path.join(tht_dir,
154                                     docker_services[sdn_base])
155             else:
156                 return os.path.join(tht_dir, sdn_base)
157
158
159 def create_deploy_cmd(ds, ns, inv, tmp_dir,
160                       virtual, env_file='opnfv-environment.yaml',
161                       net_data=False):
162
163     logging.info("Creating deployment command")
164     deploy_options = ['network-environment.yaml']
165
166     ds_opts = ds['deploy_options']
167
168     if ds_opts['containers']:
169         deploy_options.append(os.path.join(con.THT_ENV_DIR,
170                                            'docker.yaml'))
171
172     if ds['global_params']['ha_enabled']:
173         if ds_opts['containers']:
174             deploy_options.append(os.path.join(con.THT_ENV_DIR,
175                                                'docker-ha.yaml'))
176         else:
177             deploy_options.append(os.path.join(con.THT_ENV_DIR,
178                                                'puppet-pacemaker.yaml'))
179
180     if env_file:
181         deploy_options.append(env_file)
182
183     if ds_opts['containers']:
184         deploy_options.append('docker-images.yaml')
185         sdn_docker_file = get_docker_sdn_file(ds_opts)
186         if sdn_docker_file:
187             deploy_options.append(sdn_docker_file)
188             deploy_options.append('sdn-images.yaml')
189     else:
190         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
191
192     for k, v in OTHER_FILE_MAP.items():
193         if k in ds_opts and ds_opts[k]:
194             if ds_opts['containers']:
195                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
196                                                    "{}.yaml".format(k)))
197             else:
198                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
199
200     if ds_opts['ceph'] and 'csit' not in env_file:
201         prep_storage_env(ds, ns, virtual, tmp_dir)
202         deploy_options.append(os.path.join(con.THT_ENV_DIR,
203                                            'storage-environment.yaml'))
204     if ds_opts['sriov']:
205         prep_sriov_env(ds, tmp_dir)
206
207     # Check for 'k8s' here intentionally, as we may support other values
208     # such as openstack/openshift for 'vim' option.
209     if ds_opts['vim'] == 'k8s':
210         deploy_options.append('kubernetes-environment.yaml')
211
212     if virtual:
213         deploy_options.append('virtual-environment.yaml')
214     else:
215         deploy_options.append('baremetal-environment.yaml')
216
217     num_control, num_compute = inv.get_node_counts()
218     if num_control == 0 or num_compute == 0:
219         logging.error("Detected 0 control or compute nodes.  Control nodes: "
220                       "{}, compute nodes{}".format(num_control, num_compute))
221         raise ApexDeployException("Invalid number of control or computes")
222     elif num_control > 1 and not ds['global_params']['ha_enabled']:
223         num_control = 1
224     if platform.machine() == 'aarch64':
225         # aarch64 deploys were not completing in the default 90 mins.
226         # Not sure if this is related to the hardware the OOO support
227         # was developed on or the virtualization support in CentOS
228         # Either way it will probably get better over time  as the aarch
229         # support matures in CentOS and deploy time should be tested in
230         # the future so this multiplier can be removed.
231         con.DEPLOY_TIMEOUT *= 2
232     cmd = "openstack overcloud deploy --templates --timeout {} " \
233           .format(con.DEPLOY_TIMEOUT)
234     # build cmd env args
235     for option in deploy_options:
236         cmd += " -e {}".format(option)
237     cmd += " --ntp-server {}".format(ns['ntp'][0])
238     cmd += " --control-scale {}".format(num_control)
239     cmd += " --compute-scale {}".format(num_compute)
240     cmd += ' --control-flavor control --compute-flavor compute'
241     if net_data:
242         cmd += ' --networks-file network_data.yaml'
243     libvirt_type = 'kvm'
244     if virtual:
245         with open('/sys/module/kvm_intel/parameters/nested') as f:
246             nested_kvm = f.read().strip()
247             if nested_kvm != 'Y':
248                 libvirt_type = 'qemu'
249     cmd += ' --libvirt-type {}'.format(libvirt_type)
250     logging.info("Deploy command set: {}".format(cmd))
251
252     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
253         fh.write(cmd)
254     return cmd
255
256
257 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
258                patches=None):
259     """
260     Locates sdn image and preps for deployment.
261     :param ds: deploy settings
262     :param ns: network settings
263     :param img: sdn image
264     :param tmp_dir: dir to store modified sdn image
265     :param root_pw: password to configure for overcloud image
266     :param docker_tag: Docker image tag for RDO version (default None)
267     :param patches: List of patches to apply to overcloud image
268     :return: None
269     """
270     # TODO(trozet): Come up with a better way to organize this logic in this
271     # function
272     logging.info("Preparing image: {} for deployment".format(img))
273     if not os.path.isfile(img):
274         logging.error("Missing SDN image {}".format(img))
275         raise ApexDeployException("Missing SDN image file: {}".format(img))
276
277     ds_opts = ds['deploy_options']
278     virt_cmds = list()
279     sdn = ds_opts['sdn_controller']
280     patched_containers = set()
281     # we need this due to rhbz #1436021
282     # fixed in systemd-219-37.el7
283     if sdn is not False:
284         logging.info("Neutron openvswitch-agent disabled")
285         virt_cmds.extend([{
286             con.VIRT_RUN_CMD:
287                 "rm -f /etc/systemd/system/multi-user.target.wants/"
288                 "neutron-openvswitch-agent.service"},
289             {
290             con.VIRT_RUN_CMD:
291                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
292                 ".service"
293         }])
294
295     if ns.get('http_proxy', ''):
296         virt_cmds.append({
297             con.VIRT_RUN_CMD:
298                 "echo 'http_proxy={}' >> /etc/environment".format(
299                     ns['http_proxy'])})
300
301     if ns.get('https_proxy', ''):
302         virt_cmds.append({
303             con.VIRT_RUN_CMD:
304                 "echo 'https_proxy={}' >> /etc/environment".format(
305                     ns['https_proxy'])})
306
307     if ds_opts['vpn']:
308         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
309         virt_cmds.append({
310             con.VIRT_RUN_CMD:
311                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
312                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
313         virt_cmds.append({
314             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
315                               "zrpcd_start.sh"})
316         virt_cmds.append({
317             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
318                               "init.d/zrpcd_start.sh' /etc/rc.local "})
319         virt_cmds.append({
320             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
321                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
322         logging.info("ZRPCD process started")
323
324     dataplane = ds_opts['dataplane']
325     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
326         logging.info("Enabling kernel modules for dpdk")
327         # file to module mapping
328         uio_types = {
329             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
330             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
331         }
332         for mod_file, mod in uio_types.items():
333             with open(mod_file, 'w') as fh:
334                 fh.write('#!/bin/bash\n')
335                 fh.write('exec /sbin/modprobe {}'.format(mod))
336                 fh.close()
337
338             virt_cmds.extend([
339                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
340                     mod_file)},
341                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
342                                    "{}".format(os.path.basename(mod_file))}
343             ])
344     if root_pw:
345         pw_op = "password:{}".format(root_pw)
346         virt_cmds.append({con.VIRT_PW: pw_op})
347     if ds_opts['sfc'] and dataplane == 'ovs':
348         virt_cmds.extend([
349             {con.VIRT_RUN_CMD: "yum -y install "
350                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
351                                "{}".format(OVS_NSH_KMOD_RPM)},
352             {con.VIRT_RUN_CMD: "yum downgrade -y "
353                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
354                                "{}".format(OVS_NSH_RPM)}
355         ])
356     if dataplane == 'fdio':
357         # Patch neutron with using OVS external interface for router
358         # and add generic linux NS interface driver
359         virt_cmds.append(
360             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
361                                "-p1 < neutron-patch-NSDriver.patch"})
362         if sdn is False:
363             virt_cmds.extend([
364                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
365                 {con.VIRT_RUN_CMD: "yum install -y "
366                                    "/root/nosdn_vpp_rpms/*.rpm"}
367             ])
368
369     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
370     shutil.copyfile(img, tmp_oc_image)
371     logging.debug("Temporary overcloud image stored as: {}".format(
372         tmp_oc_image))
373
374     if sdn == 'opendaylight':
375         undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
376             'installer_vm']['ip']
377         oc_builder.inject_opendaylight(
378             odl_version=ds_opts['odl_version'],
379             image=tmp_oc_image,
380             tmp_dir=tmp_dir,
381             uc_ip=undercloud_admin_ip,
382             os_version=ds_opts['os_version'],
383             docker_tag=docker_tag,
384         )
385         if docker_tag:
386             patched_containers = patched_containers.union({'opendaylight'})
387
388     if patches:
389         if ds_opts['os_version'] == 'master':
390             branch = ds_opts['os_version']
391         else:
392             branch = "stable/{}".format(ds_opts['os_version'])
393         logging.info('Adding patches to overcloud')
394         patched_containers = patched_containers.union(
395             c_builder.add_upstream_patches(patches,
396                                            tmp_oc_image, tmp_dir,
397                                            branch,
398                                            uc_ip=undercloud_admin_ip,
399                                            docker_tag=docker_tag))
400     # if containers with ceph, and no ceph device we need to use a
401     # persistent loop device for Ceph OSDs
402     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
403         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
404         with open(tmp_losetup, 'w') as fh:
405             fh.write(LOSETUP_SERVICE)
406         virt_cmds.extend([
407             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
408              },
409             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
410             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
411             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
412         ])
413     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
414     logging.info("Overcloud image customization complete")
415     return patched_containers
416
417
418 def make_ssh_key():
419     """
420     Creates public and private ssh keys with 1024 bit RSA encryption
421     :return: private, public key
422     """
423     key = rsa.generate_private_key(
424         backend=crypto_default_backend(),
425         public_exponent=65537,
426         key_size=1024
427     )
428
429     private_key = key.private_bytes(
430         crypto_serialization.Encoding.PEM,
431         crypto_serialization.PrivateFormat.PKCS8,
432         crypto_serialization.NoEncryption())
433     public_key = key.public_key().public_bytes(
434         crypto_serialization.Encoding.OpenSSH,
435         crypto_serialization.PublicFormat.OpenSSH
436     )
437     return private_key.decode('utf-8'), public_key.decode('utf-8')
438
439
440 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
441     """
442     Creates modified opnfv/network environments for deployment
443     :param ds: deploy settings
444     :param ns: network settings
445     :param inv: node inventory
446     :param opnfv_env: file path for opnfv-environment file
447     :param net_env: file path for network-environment file
448     :param tmp_dir: Apex tmp dir
449     :return:
450     """
451
452     logging.info("Preparing opnfv-environment and network-environment files")
453     ds_opts = ds['deploy_options']
454     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
455     shutil.copyfile(opnfv_env, tmp_opnfv_env)
456     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
457     tenant_nic = dict()
458     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
459     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
460     external_nic_map = ns['networks']['external'][0]['nic_mapping']
461     external_nic = dict()
462     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
463
464     # SSH keys
465     private_key, public_key = make_ssh_key()
466
467     # Make easier/faster variables to index in the file editor
468     if 'performance' in ds_opts:
469         perf = True
470         # vpp
471         if 'vpp' in ds_opts['performance']['Compute']:
472             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
473         else:
474             perf_vpp_comp = None
475         if 'vpp' in ds_opts['performance']['Controller']:
476             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
477         else:
478             perf_vpp_ctrl = None
479
480         # ovs
481         if 'ovs' in ds_opts['performance']['Compute']:
482             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
483         else:
484             perf_ovs_comp = None
485
486         # kernel
487         if 'kernel' in ds_opts['performance']['Compute']:
488             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
489         else:
490             perf_kern_comp = None
491     else:
492         perf = False
493
494     # Modify OPNFV environment
495     # TODO: Change to build a dict and outputting yaml rather than parsing
496     for line in fileinput.input(tmp_opnfv_env, inplace=True):
497         line = line.strip('\n')
498         output_line = line
499         if 'CloudDomain' in line:
500             output_line = "  CloudDomain: {}".format(ns['domain_name'])
501         elif 'replace_private_key' in line:
502             output_line = "    private_key: |\n"
503             key_out = ''
504             for line in private_key.splitlines():
505                 key_out += "      {}\n".format(line)
506             output_line += key_out
507         elif 'replace_public_key' in line:
508             output_line = "    public_key: '{}'".format(public_key)
509         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
510                 'resource_registry' in line:
511             output_line = "resource_registry:\n" \
512                           "  OS::TripleO::NodeUserData: first-boot.yaml"
513         elif 'ComputeExtraConfigPre' in line and \
514                 ds_opts['dataplane'] == 'ovs_dpdk':
515             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
516                           './ovs-dpdk-preconfig.yaml'
517
518         if ds_opts['sdn_controller'] == 'opendaylight' and \
519                 'odl_vpp_routing_node' in ds_opts:
520             if 'opendaylight::vpp_routing_node' in line:
521                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
522                                .format(ds_opts['odl_vpp_routing_node'],
523                                        ns['domain_name']))
524         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
525             if 'NeutronVPPAgentPhysnets' in line:
526                 # VPP interface tap0 will be used for external network
527                 # connectivity.
528                 output_line = ("  NeutronVPPAgentPhysnets: "
529                                "'datacentre:{},external:tap0'"
530                                .format(tenant_nic['Controller']))
531         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
532                 'dvr') is True:
533             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
534                 output_line = ''
535             elif 'NeutronDhcpAgentsPerNetwork' in line:
536                 num_control, num_compute = inv.get_node_counts()
537                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
538                                .format(num_compute))
539             elif 'ComputeServices' in line:
540                 output_line = ("  ComputeServices:\n"
541                                "    - OS::TripleO::Services::NeutronDhcpAgent")
542         # SRIOV networks are VLAN based provider networks. In order to simplify
543         # the deployment, nfv_sriov will be the default physnet. VLANs are not
544         # needed in advance, and the user will have to create the network
545         # specifying the segmentation-id.
546         if ds_opts['sriov']:
547             if 'NeutronNetworkVLANRanges' in line:
548                 output_line = ("{},nfv_sriov'".format(line[:-1]))
549
550         if perf:
551             for role in 'NovaCompute', 'Controller':
552                 if role == 'NovaCompute':
553                     perf_opts = perf_vpp_comp
554                 else:
555                     perf_opts = perf_vpp_ctrl
556                 cfg = "{}ExtraConfig".format(role)
557                 if cfg in line and perf_opts:
558                     perf_line = ''
559                     if 'main-core' in perf_opts:
560                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
561                                       .format(perf_opts['main-core']))
562                     if 'corelist-workers' in perf_opts:
563                         perf_line += ("\n    "
564                                       "fdio::vpp_cpu_corelist_workers: '{}'"
565                                       .format(perf_opts['corelist-workers']))
566                     if ds_opts['sdn_controller'] == 'opendaylight' and \
567                             ds_opts['dataplane'] == 'fdio':
568                         if role == 'NovaCompute':
569                             perf_line += ("\n    "
570                                           "tripleo::profile::base::neutron::"
571                                           "agents::honeycomb::"
572                                           "interface_role_mapping:"
573                                           " ['{}:tenant-interface',"
574                                           "'{}:public-interface']"
575                                           .format(tenant_nic[role],
576                                                   external_nic[role]))
577                         else:
578                             perf_line += ("\n    "
579                                           "tripleo::profile::base::neutron::"
580                                           "agents::honeycomb::"
581                                           "interface_role_mapping:"
582                                           " ['{}:tenant-interface']"
583                                           .format(tenant_nic[role]))
584                     if perf_line:
585                         output_line = ("  {}:{}".format(cfg, perf_line))
586
587             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
588                 for k, v in OVS_PERF_MAP.items():
589                     if k in line and v in perf_ovs_comp:
590                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
591
592             # kernel args
593             # (FIXME) use compute's kernel settings for all nodes for now.
594             if perf_kern_comp:
595                 if 'NovaSchedulerDefaultFilters' in line:
596                     output_line = \
597                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
598                         "ComputeFilter,AvailabilityZoneFilter," \
599                         "ComputeCapabilitiesFilter," \
600                         "ImagePropertiesFilter,NUMATopologyFilter'"
601                 elif 'ComputeKernelArgs' in line:
602                     kernel_args = ''
603                     for k, v in perf_kern_comp.items():
604                         kernel_args += "{}={} ".format(k, v)
605                     if kernel_args:
606                         output_line = "  ComputeKernelArgs: '{}'".\
607                             format(kernel_args)
608
609         print(output_line)
610
611     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
612
613
614 def generate_ceph_key():
615     key = os.urandom(16)
616     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
617     return base64.b64encode(header + key)
618
619
620 def prep_storage_env(ds, ns, virtual, tmp_dir):
621     """
622     Creates storage environment file for deployment.  Source file is copied by
623     undercloud playbook to host.
624     :param ds:
625     :param ns:
626     :param virtual:
627     :param tmp_dir:
628     :return:
629     """
630     ds_opts = ds['deploy_options']
631     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
632     if not os.path.isfile(storage_file):
633         logging.error("storage-environment file is not in tmp directory: {}. "
634                       "Check if file was copied from "
635                       "undercloud".format(tmp_dir))
636         raise ApexDeployException("storage-environment file not copied from "
637                                   "undercloud")
638     for line in fileinput.input(storage_file, inplace=True):
639         line = line.strip('\n')
640         if 'CephClusterFSID' in line:
641             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
642         elif 'CephMonKey' in line:
643             print("  CephMonKey: {}".format(generate_ceph_key().decode(
644                 'utf-8')))
645         elif 'CephAdminKey' in line:
646             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
647                 'utf-8')))
648         elif 'CephClientKey' in line:
649             print("  CephClientKey: {}".format(generate_ceph_key().decode(
650                 'utf-8')))
651         else:
652             print(line)
653
654     if ds_opts['containers']:
655         undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
656             'installer_vm']['ip']
657         ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
658         docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
659                        "{}-centos-7".format(undercloud_admin_ip,
660                                             ceph_version)
661         ceph_params = {
662             'DockerCephDaemonImage': docker_image,
663         }
664
665         # max pgs allowed are calculated as num_mons * 200. Therefore we
666         # set number of pgs and pools so that the total will be less:
667         # num_pgs * num_pools * num_osds
668         ceph_params['CephPoolDefaultSize'] = 2
669         ceph_params['CephPoolDefaultPgNum'] = 32
670         if virtual:
671             ceph_params['CephAnsibleExtraConfig'] = {
672                 'centos_package_dependencies': [],
673                 'ceph_osd_docker_memory_limit': '1g',
674                 'ceph_mds_docker_memory_limit': '1g',
675             }
676         ceph_device = ds_opts['ceph_device']
677         ceph_params['CephAnsibleDisksConfig'] = {
678             'devices': [ceph_device],
679             'journal_size': 512,
680             'osd_scenario': 'collocated'
681         }
682         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
683     # TODO(trozet): remove following block as we only support containers now
684     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
685         with open(storage_file, 'a') as fh:
686             fh.write('  ExtraConfig:\n')
687             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
688                 ds_opts['ceph_device']
689             ))
690
691
692 def prep_sriov_env(ds, tmp_dir):
693     """
694     Creates SRIOV environment file for deployment. Source file is copied by
695     undercloud playbook to host.
696     :param ds:
697     :param tmp_dir:
698     :return:
699     """
700     ds_opts = ds['deploy_options']
701     sriov_iface = ds_opts['sriov']
702     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
703     if not os.path.isfile(sriov_file):
704         logging.error("sriov-environment file is not in tmp directory: {}. "
705                       "Check if file was copied from "
706                       "undercloud".format(tmp_dir))
707         raise ApexDeployException("sriov-environment file not copied from "
708                                   "undercloud")
709     # TODO(rnoriega): Instead of line editing, refactor this code to load
710     # yaml file into a dict, edit it and write the file back.
711     for line in fileinput.input(sriov_file, inplace=True):
712         line = line.strip('\n')
713         if 'NovaSchedulerDefaultFilters' in line:
714             print("  {}".format(line[3:]))
715         elif 'NovaSchedulerAvailableFilters' in line:
716             print("  {}".format(line[3:]))
717         elif 'NeutronPhysicalDevMappings' in line:
718             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
719                   .format(sriov_iface))
720         elif 'NeutronSriovNumVFs' in line:
721             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
722         elif 'NovaPCIPassthrough' in line:
723             print("  NovaPCIPassthrough:")
724         elif 'devname' in line:
725             print("    - devname: \"{}\"".format(sriov_iface))
726         elif 'physical_network' in line:
727             print("      physical_network: \"nfv_sriov\"")
728         else:
729             print(line)
730
731
732 def external_network_cmds(ns, ds):
733     """
734     Generates external network openstack commands
735     :param ns: network settings
736     :param ds: deploy settings
737     :return: list of commands to configure external network
738     """
739     ds_opts = ds['deploy_options']
740     external_physnet = 'datacentre'
741     if ds_opts['dataplane'] == 'fdio' and \
742        ds_opts['sdn_controller'] != 'opendaylight':
743         external_physnet = 'external'
744     if 'external' in ns.enabled_network_list:
745         net_config = ns['networks']['external'][0]
746         external = True
747         pool_start, pool_end = net_config['floating_ip_range']
748     else:
749         net_config = ns['networks']['admin']
750         external = False
751         pool_start, pool_end = ns['apex']['networks']['admin'][
752             'introspection_range']
753     nic_config = net_config['nic_mapping']
754     gateway = net_config['gateway']
755     cmds = list()
756     # create network command
757     if nic_config['compute']['vlan'] == 'native':
758         ext_type = 'flat'
759     else:
760         ext_type = "vlan --provider-segment {}".format(nic_config[
761                                                        'compute']['vlan'])
762     cmds.append("openstack network create external --project service "
763                 "--external --provider-network-type {} "
764                 "--provider-physical-network {}"
765                 .format(ext_type, external_physnet))
766     # create subnet command
767     cidr = net_config['cidr']
768     subnet_cmd = "openstack subnet create external-subnet --project " \
769                  "service --network external --no-dhcp --gateway {} " \
770                  "--allocation-pool start={},end={} --subnet-range " \
771                  "{}".format(gateway, pool_start, pool_end, str(cidr))
772     if external and cidr.version == 6:
773         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
774                       '--ipv6-address-mode slaac'
775     cmds.append(subnet_cmd)
776     logging.debug("Neutron external network commands determined "
777                   "as: {}".format(cmds))
778     return cmds
779
780
781 def create_congress_cmds(overcloud_file):
782     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
783     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
784     logging.info("Creating congress commands")
785     try:
786         ds_cfg = [
787             "username={}".format(overcloudrc['OS_USERNAME']),
788             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
789             "password={}".format(overcloudrc['OS_PASSWORD']),
790             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
791         ]
792     except KeyError:
793         logging.error("Unable to find all keys required for congress in "
794                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
795                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
796                       "file: {}".format(overcloud_file))
797         raise
798     cmds = list()
799     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
800
801     for driver in drivers:
802         if driver == 'doctor':
803             cmd = "{} \"{}\"".format(driver, driver)
804         else:
805             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
806         if driver == 'nova':
807             cmd += ' --config api_version="2.34"'
808         logging.debug("Congress command created: {}".format(cmd))
809         cmds.append(cmd)
810     return cmds