Merge "Add support for kubernetes deployment"
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import shutil
16 import uuid
17 import struct
18 import time
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
21
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
28     crypto_serialization
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31     crypto_default_backend
32
33
34 SDN_FILE_MAP = {
35     'opendaylight': {
36         'sfc': 'neutron-sfc-opendaylight.yaml',
37         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38         'gluon': 'gluon.yaml',
39         'vpp': {
40             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42             'default': 'neutron-opendaylight-honeycomb.yaml'
43         },
44         'l2gw': 'neutron-l2gw-opendaylight.yaml',
45         'sriov': 'neutron-opendaylight-sriov.yaml',
46         'default': 'neutron-opendaylight.yaml',
47     },
48     'onos': {
49         'sfc': 'neutron-onos-sfc.yaml',
50         'default': 'neutron-onos.yaml'
51     },
52     'ovn': 'neutron-ml2-ovn.yaml',
53     False: {
54         'vpp': 'neutron-ml2-vpp.yaml',
55         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
56     }
57 }
58
59 OTHER_FILE_MAP = {
60     'tacker': 'enable_tacker.yaml',
61     'congress': 'enable_congress.yaml',
62     'barometer': 'enable_barometer.yaml',
63     'rt_kvm': 'enable_rt_kvm.yaml'
64 }
65
66 OVS_PERF_MAP = {
67     'HostCpusList': 'dpdk_cores',
68     'NeutronDpdkCoreList': 'pmd_cores',
69     'NeutronDpdkSocketMemory': 'socket_memory',
70     'NeutronDpdkMemoryChannels': 'memory_channels'
71 }
72
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOSETUP_SERVICE = """[Unit]
79 Description=Setup loop devices
80 Before=network.target
81
82 [Service]
83 Type=oneshot
84 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
85 ExecStop=/sbin/losetup -d /dev/loop3
86 TimeoutSec=60
87 RemainAfterExit=yes
88
89 [Install]
90 WantedBy=multi-user.target
91 """
92
93
94 def build_sdn_env_list(ds, sdn_map, env_list=None):
95     """
96     Builds a list of SDN environment files to be used in the deploy cmd.
97
98     This function recursively searches an sdn_map.  First the sdn controller is
99     matched and then the function looks for enabled features for that
100     controller to determine which environment files should be used.  By
101     default the feature will be checked if set to true in deploy settings to be
102     added to the list.  If a feature does not have a boolean value, then the
103     key and value pair to compare with are checked as a tuple (k,v).
104
105     :param ds: deploy settings
106     :param sdn_map: SDN map to recursively search
107     :param env_list: recursive var to hold previously found env_list
108     :return: A list of env files
109     """
110     if env_list is None:
111         env_list = list()
112     for k, v in sdn_map.items():
113         if ds['sdn_controller'] == k or (k in ds and ds[k]):
114             if isinstance(v, dict):
115                 # Append default SDN env file first
116                 # The assumption is that feature-enabled SDN env files
117                 # override and do not conflict with previously set default
118                 # settings
119                 if ds['sdn_controller'] == k and 'default' in v:
120                     env_list.append(os.path.join(con.THT_ENV_DIR,
121                                                  v['default']))
122                 env_list.extend(build_sdn_env_list(ds, v))
123             # check if the value is not a boolean
124             elif isinstance(v, tuple):
125                     if ds[k] == v[0]:
126                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
127             else:
128                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
129     if len(env_list) == 0:
130         try:
131             env_list.append(os.path.join(
132                 con.THT_ENV_DIR, sdn_map['default']))
133         except KeyError:
134             logging.warning("Unable to find default file for SDN")
135
136     return env_list
137
138
139 def get_docker_sdn_file(ds_opts):
140     """
141     Returns docker env file for detected SDN
142     :param ds_opts: deploy options
143     :return: docker THT env file for an SDN
144     """
145     # FIXME(trozet): We assume right now there is only one docker SDN file
146     docker_services = con.VALID_DOCKER_SERVICES
147     tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
148     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
149     for sdn_file in sdn_env_list:
150         sdn_base = os.path.basename(sdn_file)
151         if sdn_base in docker_services:
152             if docker_services[sdn_base] is not None:
153                 return os.path.join(tht_dir,
154                                     docker_services[sdn_base])
155             else:
156                 return os.path.join(tht_dir, sdn_base)
157
158
159 def create_deploy_cmd(ds, ns, inv, tmp_dir,
160                       virtual, env_file='opnfv-environment.yaml',
161                       net_data=False):
162
163     logging.info("Creating deployment command")
164     deploy_options = ['network-environment.yaml']
165
166     ds_opts = ds['deploy_options']
167
168     if ds_opts['containers']:
169         deploy_options.append(os.path.join(con.THT_ENV_DIR,
170                                            'docker.yaml'))
171
172     if ds['global_params']['ha_enabled']:
173         if ds_opts['containers']:
174             deploy_options.append(os.path.join(con.THT_ENV_DIR,
175                                                'docker-ha.yaml'))
176         else:
177             deploy_options.append(os.path.join(con.THT_ENV_DIR,
178                                                'puppet-pacemaker.yaml'))
179
180     if env_file:
181         deploy_options.append(env_file)
182
183     if ds_opts['containers']:
184         deploy_options.append('docker-images.yaml')
185         sdn_docker_file = get_docker_sdn_file(ds_opts)
186         if sdn_docker_file:
187             deploy_options.append(sdn_docker_file)
188             deploy_options.append('sdn-images.yaml')
189     else:
190         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
191
192     for k, v in OTHER_FILE_MAP.items():
193         if k in ds_opts and ds_opts[k]:
194             if ds_opts['containers']:
195                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
196                                                    "{}.yaml".format(k)))
197             else:
198                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
199
200     if ds_opts['ceph'] and 'csit' not in env_file:
201         prep_storage_env(ds, ns, virtual, tmp_dir)
202         deploy_options.append(os.path.join(con.THT_ENV_DIR,
203                                            'storage-environment.yaml'))
204     if ds_opts['sriov']:
205         prep_sriov_env(ds, tmp_dir)
206
207     # Check for 'k8s' here intentionally, as we may support other values
208     # such as openstack/openshift for 'vim' option.
209     if ds_opts['vim'] == 'k8s':
210         deploy_options.append('kubernetes-environment.yaml')
211
212     if virtual:
213         deploy_options.append('virtual-environment.yaml')
214     else:
215         deploy_options.append('baremetal-environment.yaml')
216
217     num_control, num_compute = inv.get_node_counts()
218     if num_control == 0 or num_compute == 0:
219         logging.error("Detected 0 control or compute nodes.  Control nodes: "
220                       "{}, compute nodes{}".format(num_control, num_compute))
221         raise ApexDeployException("Invalid number of control or computes")
222     elif num_control > 1 and not ds['global_params']['ha_enabled']:
223         num_control = 1
224     if platform.machine() == 'aarch64':
225         # aarch64 deploys were not completing in the default 90 mins.
226         # Not sure if this is related to the hardware the OOO support
227         # was developed on or the virtualization support in CentOS
228         # Either way it will probably get better over time  as the aarch
229         # support matures in CentOS and deploy time should be tested in
230         # the future so this multiplier can be removed.
231         con.DEPLOY_TIMEOUT *= 2
232     cmd = "openstack overcloud deploy --templates --timeout {} " \
233           .format(con.DEPLOY_TIMEOUT)
234     # build cmd env args
235     for option in deploy_options:
236         cmd += " -e {}".format(option)
237     cmd += " --ntp-server {}".format(ns['ntp'][0])
238     cmd += " --control-scale {}".format(num_control)
239     cmd += " --compute-scale {}".format(num_compute)
240     cmd += ' --control-flavor control --compute-flavor compute'
241     if net_data:
242         cmd += ' --networks-file network_data.yaml'
243     libvirt_type = 'kvm'
244     if virtual:
245         with open('/sys/module/kvm_intel/parameters/nested') as f:
246             nested_kvm = f.read().strip()
247             if nested_kvm != 'Y':
248                 libvirt_type = 'qemu'
249     cmd += ' --libvirt-type {}'.format(libvirt_type)
250     logging.info("Deploy command set: {}".format(cmd))
251
252     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
253         fh.write(cmd)
254     return cmd
255
256
257 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
258                patches=None):
259     """
260     Locates sdn image and preps for deployment.
261     :param ds: deploy settings
262     :param ns: network settings
263     :param img: sdn image
264     :param tmp_dir: dir to store modified sdn image
265     :param root_pw: password to configure for overcloud image
266     :param docker_tag: Docker image tag for RDO version (default None)
267     :param patches: List of patches to apply to overcloud image
268     :return: None
269     """
270     # TODO(trozet): Come up with a better way to organize this logic in this
271     # function
272     logging.info("Preparing image: {} for deployment".format(img))
273     if not os.path.isfile(img):
274         logging.error("Missing SDN image {}".format(img))
275         raise ApexDeployException("Missing SDN image file: {}".format(img))
276
277     ds_opts = ds['deploy_options']
278     virt_cmds = list()
279     sdn = ds_opts['sdn_controller']
280     patched_containers = set()
281     # we need this due to rhbz #1436021
282     # fixed in systemd-219-37.el7
283     if sdn is not False:
284         logging.info("Neutron openvswitch-agent disabled")
285         virt_cmds.extend([{
286             con.VIRT_RUN_CMD:
287                 "rm -f /etc/systemd/system/multi-user.target.wants/"
288                 "neutron-openvswitch-agent.service"},
289             {
290             con.VIRT_RUN_CMD:
291                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
292                 ".service"
293         }])
294
295     if ns.get('http_proxy', ''):
296         virt_cmds.append({
297             con.VIRT_RUN_CMD:
298                 "echo 'http_proxy={}' >> /etc/environment".format(
299                     ns['http_proxy'])})
300
301     if ns.get('https_proxy', ''):
302         virt_cmds.append({
303             con.VIRT_RUN_CMD:
304                 "echo 'https_proxy={}' >> /etc/environment".format(
305                     ns['https_proxy'])})
306
307     if ds_opts['vpn']:
308         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
309         virt_cmds.append({
310             con.VIRT_RUN_CMD:
311                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
312                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
313         virt_cmds.append({
314             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
315                               "zrpcd_start.sh"})
316         virt_cmds.append({
317             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
318                               "init.d/zrpcd_start.sh' /etc/rc.local "})
319         virt_cmds.append({
320             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
321                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
322         logging.info("ZRPCD process started")
323
324     dataplane = ds_opts['dataplane']
325     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
326         logging.info("Enabling kernel modules for dpdk")
327         # file to module mapping
328         uio_types = {
329             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
330             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
331         }
332         for mod_file, mod in uio_types.items():
333             with open(mod_file, 'w') as fh:
334                 fh.write('#!/bin/bash\n')
335                 fh.write('exec /sbin/modprobe {}'.format(mod))
336                 fh.close()
337
338             virt_cmds.extend([
339                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
340                     mod_file)},
341                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
342                                    "{}".format(os.path.basename(mod_file))}
343             ])
344     if root_pw:
345         pw_op = "password:{}".format(root_pw)
346         virt_cmds.append({con.VIRT_PW: pw_op})
347     if ds_opts['sfc'] and dataplane == 'ovs':
348         virt_cmds.extend([
349             {con.VIRT_RUN_CMD: "yum -y install "
350                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
351                                "{}".format(OVS_NSH_KMOD_RPM)},
352             {con.VIRT_RUN_CMD: "yum downgrade -y "
353                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
354                                "{}".format(OVS_NSH_RPM)}
355         ])
356     if dataplane == 'fdio':
357         # Patch neutron with using OVS external interface for router
358         # and add generic linux NS interface driver
359         virt_cmds.append(
360             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
361                                "-p1 < neutron-patch-NSDriver.patch"})
362         if sdn is False:
363             virt_cmds.extend([
364                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
365                 {con.VIRT_RUN_CMD: "yum install -y "
366                                    "/root/nosdn_vpp_rpms/*.rpm"}
367             ])
368
369     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
370     shutil.copyfile(img, tmp_oc_image)
371     logging.debug("Temporary overcloud image stored as: {}".format(
372         tmp_oc_image))
373
374     if sdn == 'opendaylight':
375         undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
376             'installer_vm']['ip']
377         oc_builder.inject_opendaylight(
378             odl_version=ds_opts['odl_version'],
379             image=tmp_oc_image,
380             tmp_dir=tmp_dir,
381             uc_ip=undercloud_admin_ip,
382             os_version=ds_opts['os_version'],
383             docker_tag=docker_tag,
384         )
385         if docker_tag:
386             patched_containers = patched_containers.union({'opendaylight'})
387
388     if patches:
389         if ds_opts['os_version'] == 'master':
390             branch = ds_opts['os_version']
391         else:
392             branch = "stable/{}".format(ds_opts['os_version'])
393         logging.info('Adding patches to overcloud')
394         patched_containers = patched_containers.union(
395             c_builder.add_upstream_patches(patches,
396                                            tmp_oc_image, tmp_dir,
397                                            branch,
398                                            uc_ip=undercloud_admin_ip,
399                                            docker_tag=docker_tag))
400     # if containers with ceph, and no ceph device we need to use a
401     # persistent loop device for Ceph OSDs
402     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
403         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
404         with open(tmp_losetup, 'w') as fh:
405             fh.write(LOSETUP_SERVICE)
406         virt_cmds.extend([
407             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
408              },
409             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
410             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
411             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
412         ])
413     # TODO(trozet) remove this after LP#173474 is fixed
414     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
415     virt_cmds.append(
416         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
417                            "ConditionPathExists".format(dhcp_unit)})
418     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
419     logging.info("Overcloud image customization complete")
420     return patched_containers
421
422
423 def make_ssh_key():
424     """
425     Creates public and private ssh keys with 1024 bit RSA encryption
426     :return: private, public key
427     """
428     key = rsa.generate_private_key(
429         backend=crypto_default_backend(),
430         public_exponent=65537,
431         key_size=1024
432     )
433
434     private_key = key.private_bytes(
435         crypto_serialization.Encoding.PEM,
436         crypto_serialization.PrivateFormat.PKCS8,
437         crypto_serialization.NoEncryption())
438     public_key = key.public_key().public_bytes(
439         crypto_serialization.Encoding.OpenSSH,
440         crypto_serialization.PublicFormat.OpenSSH
441     )
442     return private_key.decode('utf-8'), public_key.decode('utf-8')
443
444
445 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
446     """
447     Creates modified opnfv/network environments for deployment
448     :param ds: deploy settings
449     :param ns: network settings
450     :param inv: node inventory
451     :param opnfv_env: file path for opnfv-environment file
452     :param net_env: file path for network-environment file
453     :param tmp_dir: Apex tmp dir
454     :return:
455     """
456
457     logging.info("Preparing opnfv-environment and network-environment files")
458     ds_opts = ds['deploy_options']
459     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
460     shutil.copyfile(opnfv_env, tmp_opnfv_env)
461     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
462     tenant_nic = dict()
463     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
464     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
465     external_nic_map = ns['networks']['external'][0]['nic_mapping']
466     external_nic = dict()
467     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
468
469     # SSH keys
470     private_key, public_key = make_ssh_key()
471
472     # Make easier/faster variables to index in the file editor
473     if 'performance' in ds_opts:
474         perf = True
475         # vpp
476         if 'vpp' in ds_opts['performance']['Compute']:
477             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
478         else:
479             perf_vpp_comp = None
480         if 'vpp' in ds_opts['performance']['Controller']:
481             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
482         else:
483             perf_vpp_ctrl = None
484
485         # ovs
486         if 'ovs' in ds_opts['performance']['Compute']:
487             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
488         else:
489             perf_ovs_comp = None
490
491         # kernel
492         if 'kernel' in ds_opts['performance']['Compute']:
493             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
494         else:
495             perf_kern_comp = None
496     else:
497         perf = False
498
499     # Modify OPNFV environment
500     # TODO: Change to build a dict and outputting yaml rather than parsing
501     for line in fileinput.input(tmp_opnfv_env, inplace=True):
502         line = line.strip('\n')
503         output_line = line
504         if 'CloudDomain' in line:
505             output_line = "  CloudDomain: {}".format(ns['domain_name'])
506         elif 'replace_private_key' in line:
507             output_line = "    private_key: |\n"
508             key_out = ''
509             for line in private_key.splitlines():
510                 key_out += "      {}\n".format(line)
511             output_line += key_out
512         elif 'replace_public_key' in line:
513             output_line = "    public_key: '{}'".format(public_key)
514         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
515                 'resource_registry' in line:
516             output_line = "resource_registry:\n" \
517                           "  OS::TripleO::NodeUserData: first-boot.yaml"
518         elif 'ComputeExtraConfigPre' in line and \
519                 ds_opts['dataplane'] == 'ovs_dpdk':
520             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
521                           './ovs-dpdk-preconfig.yaml'
522
523         if ds_opts['sdn_controller'] == 'opendaylight' and \
524                 'odl_vpp_routing_node' in ds_opts:
525             if 'opendaylight::vpp_routing_node' in line:
526                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
527                                .format(ds_opts['odl_vpp_routing_node'],
528                                        ns['domain_name']))
529         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
530             if 'NeutronVPPAgentPhysnets' in line:
531                 # VPP interface tap0 will be used for external network
532                 # connectivity.
533                 output_line = ("  NeutronVPPAgentPhysnets: "
534                                "'datacentre:{},external:tap0'"
535                                .format(tenant_nic['Controller']))
536         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
537                 'dvr') is True:
538             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
539                 output_line = ''
540             elif 'NeutronDhcpAgentsPerNetwork' in line:
541                 num_control, num_compute = inv.get_node_counts()
542                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
543                                .format(num_compute))
544             elif 'ComputeServices' in line:
545                 output_line = ("  ComputeServices:\n"
546                                "    - OS::TripleO::Services::NeutronDhcpAgent")
547         # SRIOV networks are VLAN based provider networks. In order to simplify
548         # the deployment, nfv_sriov will be the default physnet. VLANs are not
549         # needed in advance, and the user will have to create the network
550         # specifying the segmentation-id.
551         if ds_opts['sriov']:
552             if 'NeutronNetworkVLANRanges' in line:
553                 output_line = ("{},nfv_sriov'".format(line[:-1]))
554
555         if perf:
556             for role in 'NovaCompute', 'Controller':
557                 if role == 'NovaCompute':
558                     perf_opts = perf_vpp_comp
559                 else:
560                     perf_opts = perf_vpp_ctrl
561                 cfg = "{}ExtraConfig".format(role)
562                 if cfg in line and perf_opts:
563                     perf_line = ''
564                     if 'main-core' in perf_opts:
565                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
566                                       .format(perf_opts['main-core']))
567                     if 'corelist-workers' in perf_opts:
568                         perf_line += ("\n    "
569                                       "fdio::vpp_cpu_corelist_workers: '{}'"
570                                       .format(perf_opts['corelist-workers']))
571                     if ds_opts['sdn_controller'] == 'opendaylight' and \
572                             ds_opts['dataplane'] == 'fdio':
573                         if role == 'NovaCompute':
574                             perf_line += ("\n    "
575                                           "tripleo::profile::base::neutron::"
576                                           "agents::honeycomb::"
577                                           "interface_role_mapping:"
578                                           " ['{}:tenant-interface',"
579                                           "'{}:public-interface']"
580                                           .format(tenant_nic[role],
581                                                   external_nic[role]))
582                         else:
583                             perf_line += ("\n    "
584                                           "tripleo::profile::base::neutron::"
585                                           "agents::honeycomb::"
586                                           "interface_role_mapping:"
587                                           " ['{}:tenant-interface']"
588                                           .format(tenant_nic[role]))
589                     if perf_line:
590                         output_line = ("  {}:{}".format(cfg, perf_line))
591
592             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
593                 for k, v in OVS_PERF_MAP.items():
594                     if k in line and v in perf_ovs_comp:
595                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
596
597             # kernel args
598             # (FIXME) use compute's kernel settings for all nodes for now.
599             if perf_kern_comp:
600                 if 'NovaSchedulerDefaultFilters' in line:
601                     output_line = \
602                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
603                         "ComputeFilter,AvailabilityZoneFilter," \
604                         "ComputeCapabilitiesFilter," \
605                         "ImagePropertiesFilter,NUMATopologyFilter'"
606                 elif 'ComputeKernelArgs' in line:
607                     kernel_args = ''
608                     for k, v in perf_kern_comp.items():
609                         kernel_args += "{}={} ".format(k, v)
610                     if kernel_args:
611                         output_line = "  ComputeKernelArgs: '{}'".\
612                             format(kernel_args)
613
614         print(output_line)
615
616     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
617
618
619 def generate_ceph_key():
620     key = os.urandom(16)
621     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
622     return base64.b64encode(header + key)
623
624
625 def prep_storage_env(ds, ns, virtual, tmp_dir):
626     """
627     Creates storage environment file for deployment.  Source file is copied by
628     undercloud playbook to host.
629     :param ds:
630     :param ns:
631     :param virtual:
632     :param tmp_dir:
633     :return:
634     """
635     ds_opts = ds['deploy_options']
636     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
637     if not os.path.isfile(storage_file):
638         logging.error("storage-environment file is not in tmp directory: {}. "
639                       "Check if file was copied from "
640                       "undercloud".format(tmp_dir))
641         raise ApexDeployException("storage-environment file not copied from "
642                                   "undercloud")
643     for line in fileinput.input(storage_file, inplace=True):
644         line = line.strip('\n')
645         if 'CephClusterFSID' in line:
646             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
647         elif 'CephMonKey' in line:
648             print("  CephMonKey: {}".format(generate_ceph_key().decode(
649                 'utf-8')))
650         elif 'CephAdminKey' in line:
651             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
652                 'utf-8')))
653         elif 'CephClientKey' in line:
654             print("  CephClientKey: {}".format(generate_ceph_key().decode(
655                 'utf-8')))
656         else:
657             print(line)
658
659     if ds_opts['containers']:
660         undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
661             'installer_vm']['ip']
662         ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
663         docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
664                        "{}-centos-7".format(undercloud_admin_ip,
665                                             ceph_version)
666         ceph_params = {
667             'DockerCephDaemonImage': docker_image,
668         }
669
670         # max pgs allowed are calculated as num_mons * 200. Therefore we
671         # set number of pgs and pools so that the total will be less:
672         # num_pgs * num_pools * num_osds
673         ceph_params['CephPoolDefaultSize'] = 2
674         ceph_params['CephPoolDefaultPgNum'] = 32
675         if virtual:
676             ceph_params['CephAnsibleExtraConfig'] = {
677                 'centos_package_dependencies': [],
678                 'ceph_osd_docker_memory_limit': '1g',
679                 'ceph_mds_docker_memory_limit': '1g',
680             }
681         ceph_device = ds_opts['ceph_device']
682         ceph_params['CephAnsibleDisksConfig'] = {
683             'devices': [ceph_device],
684             'journal_size': 512,
685             'osd_scenario': 'collocated'
686         }
687         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
688     # TODO(trozet): remove following block as we only support containers now
689     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
690         with open(storage_file, 'a') as fh:
691             fh.write('  ExtraConfig:\n')
692             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
693                 ds_opts['ceph_device']
694             ))
695
696
697 def prep_sriov_env(ds, tmp_dir):
698     """
699     Creates SRIOV environment file for deployment. Source file is copied by
700     undercloud playbook to host.
701     :param ds:
702     :param tmp_dir:
703     :return:
704     """
705     ds_opts = ds['deploy_options']
706     sriov_iface = ds_opts['sriov']
707     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
708     if not os.path.isfile(sriov_file):
709         logging.error("sriov-environment file is not in tmp directory: {}. "
710                       "Check if file was copied from "
711                       "undercloud".format(tmp_dir))
712         raise ApexDeployException("sriov-environment file not copied from "
713                                   "undercloud")
714     # TODO(rnoriega): Instead of line editing, refactor this code to load
715     # yaml file into a dict, edit it and write the file back.
716     for line in fileinput.input(sriov_file, inplace=True):
717         line = line.strip('\n')
718         if 'NovaSchedulerDefaultFilters' in line:
719             print("  {}".format(line[3:]))
720         elif 'NovaSchedulerAvailableFilters' in line:
721             print("  {}".format(line[3:]))
722         elif 'NeutronPhysicalDevMappings' in line:
723             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
724                   .format(sriov_iface))
725         elif 'NeutronSriovNumVFs' in line:
726             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
727         elif 'NovaPCIPassthrough' in line:
728             print("  NovaPCIPassthrough:")
729         elif 'devname' in line:
730             print("    - devname: \"{}\"".format(sriov_iface))
731         elif 'physical_network' in line:
732             print("      physical_network: \"nfv_sriov\"")
733         else:
734             print(line)
735
736
737 def external_network_cmds(ns, ds):
738     """
739     Generates external network openstack commands
740     :param ns: network settings
741     :param ds: deploy settings
742     :return: list of commands to configure external network
743     """
744     ds_opts = ds['deploy_options']
745     external_physnet = 'datacentre'
746     if ds_opts['dataplane'] == 'fdio' and \
747        ds_opts['sdn_controller'] != 'opendaylight':
748         external_physnet = 'external'
749     if 'external' in ns.enabled_network_list:
750         net_config = ns['networks']['external'][0]
751         external = True
752         pool_start, pool_end = net_config['floating_ip_range']
753     else:
754         net_config = ns['networks']['admin']
755         external = False
756         pool_start, pool_end = ns['apex']['networks']['admin'][
757             'introspection_range']
758     nic_config = net_config['nic_mapping']
759     gateway = net_config['gateway']
760     cmds = list()
761     # create network command
762     if nic_config['compute']['vlan'] == 'native':
763         ext_type = 'flat'
764     else:
765         ext_type = "vlan --provider-segment {}".format(nic_config[
766                                                        'compute']['vlan'])
767     cmds.append("openstack network create external --project service "
768                 "--external --provider-network-type {} "
769                 "--provider-physical-network {}"
770                 .format(ext_type, external_physnet))
771     # create subnet command
772     cidr = net_config['cidr']
773     subnet_cmd = "openstack subnet create external-subnet --project " \
774                  "service --network external --no-dhcp --gateway {} " \
775                  "--allocation-pool start={},end={} --subnet-range " \
776                  "{}".format(gateway, pool_start, pool_end, str(cidr))
777     if external and cidr.version == 6:
778         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
779                       '--ipv6-address-mode slaac'
780     cmds.append(subnet_cmd)
781     logging.debug("Neutron external network commands determined "
782                   "as: {}".format(cmds))
783     return cmds
784
785
786 def create_congress_cmds(overcloud_file):
787     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
788     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
789     logging.info("Creating congress commands")
790     try:
791         ds_cfg = [
792             "username={}".format(overcloudrc['OS_USERNAME']),
793             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
794             "password={}".format(overcloudrc['OS_PASSWORD']),
795             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
796         ]
797     except KeyError:
798         logging.error("Unable to find all keys required for congress in "
799                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
800                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
801                       "file: {}".format(overcloud_file))
802         raise
803     cmds = list()
804     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
805
806     for driver in drivers:
807         if driver == 'doctor':
808             cmd = "{} \"{}\"".format(driver, driver)
809         else:
810             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
811         if driver == 'nova':
812             cmd += ' --config api_version="2.34"'
813         logging.debug("Congress command created: {}".format(cmd))
814         cmds.append(cmd)
815     return cmds