Fix nosdn fdio scenario
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import shutil
16 import uuid
17 import struct
18 import time
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
21
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
28     crypto_serialization
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31     crypto_default_backend
32
33
34 SDN_FILE_MAP = {
35     'opendaylight': {
36         'sfc': 'neutron-sfc-opendaylight.yaml',
37         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38         'gluon': 'gluon.yaml',
39         'vpp': {
40             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42             'default': 'neutron-opendaylight-honeycomb.yaml'
43         },
44         'l2gw': 'neutron-l2gw-opendaylight.yaml',
45         'sriov': 'neutron-opendaylight-sriov.yaml',
46         'default': 'neutron-opendaylight.yaml',
47     },
48     'onos': {
49         'sfc': 'neutron-onos-sfc.yaml',
50         'default': 'neutron-onos.yaml'
51     },
52     'ovn': 'neutron-ml2-ovn.yaml',
53     False: {
54         'vpp': 'neutron-ml2-vpp.yaml',
55         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
56     }
57 }
58
59 OTHER_FILE_MAP = {
60     'tacker': 'enable_tacker.yaml',
61     'congress': 'enable_congress.yaml',
62     'barometer': 'enable_barometer.yaml',
63     'rt_kvm': 'enable_rt_kvm.yaml'
64 }
65
66 OVS_PERF_MAP = {
67     'HostCpusList': 'dpdk_cores',
68     'NeutronDpdkCoreList': 'pmd_cores',
69     'NeutronDpdkSocketMemory': 'socket_memory',
70     'NeutronDpdkMemoryChannels': 'memory_channels'
71 }
72
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOSETUP_SERVICE = """[Unit]
79 Description=Setup loop devices
80 Before=network.target
81
82 [Service]
83 Type=oneshot
84 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
85 ExecStop=/sbin/losetup -d /dev/loop3
86 TimeoutSec=60
87 RemainAfterExit=yes
88
89 [Install]
90 WantedBy=multi-user.target
91 """
92
93
94 def build_sdn_env_list(ds, sdn_map, env_list=None):
95     """
96     Builds a list of SDN environment files to be used in the deploy cmd.
97
98     This function recursively searches an sdn_map.  First the sdn controller is
99     matched and then the function looks for enabled features for that
100     controller to determine which environment files should be used.  By
101     default the feature will be checked if set to true in deploy settings to be
102     added to the list.  If a feature does not have a boolean value, then the
103     key and value pair to compare with are checked as a tuple (k,v).
104
105     :param ds: deploy settings
106     :param sdn_map: SDN map to recursively search
107     :param env_list: recursive var to hold previously found env_list
108     :return: A list of env files
109     """
110     if env_list is None:
111         env_list = list()
112     for k, v in sdn_map.items():
113         if ds['sdn_controller'] == k or (k in ds and ds[k]):
114             if isinstance(v, dict):
115                 # Append default SDN env file first
116                 # The assumption is that feature-enabled SDN env files
117                 # override and do not conflict with previously set default
118                 # settings
119                 if ds['sdn_controller'] == k and 'default' in v:
120                     env_list.append(os.path.join(con.THT_ENV_DIR,
121                                                  v['default']))
122                 env_list.extend(build_sdn_env_list(ds, v))
123             # check if the value is not a boolean
124             elif isinstance(v, tuple):
125                     if ds[k] == v[0]:
126                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
127             else:
128                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
129     if len(env_list) == 0:
130         try:
131             env_list.append(os.path.join(
132                 con.THT_ENV_DIR, sdn_map['default']))
133         except KeyError:
134             logging.warning("Unable to find default file for SDN")
135
136     return env_list
137
138
139 def get_docker_sdn_file(ds_opts):
140     """
141     Returns docker env file for detected SDN
142     :param ds_opts: deploy options
143     :return: docker THT env file for an SDN
144     """
145     # FIXME(trozet): We assume right now there is only one docker SDN file
146     docker_services = con.VALID_DOCKER_SERVICES
147     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
148     for sdn_file in sdn_env_list:
149         sdn_base = os.path.basename(sdn_file)
150         if sdn_base in docker_services:
151             if docker_services[sdn_base] is not None:
152                 return os.path.join(con.THT_DOCKER_ENV_DIR,
153                                     docker_services[sdn_base])
154             else:
155                 return os.path.join(con.THT_DOCKER_ENV_DIR, sdn_base)
156
157
158 def create_deploy_cmd(ds, ns, inv, tmp_dir,
159                       virtual, env_file='opnfv-environment.yaml',
160                       net_data=False):
161
162     logging.info("Creating deployment command")
163     deploy_options = ['network-environment.yaml']
164
165     ds_opts = ds['deploy_options']
166
167     if ds_opts['containers']:
168         deploy_options.append(os.path.join(con.THT_ENV_DIR,
169                                            'docker.yaml'))
170
171     if ds['global_params']['ha_enabled']:
172         if ds_opts['containers']:
173             deploy_options.append(os.path.join(con.THT_ENV_DIR,
174                                                'docker-ha.yaml'))
175         else:
176             deploy_options.append(os.path.join(con.THT_ENV_DIR,
177                                                'puppet-pacemaker.yaml'))
178
179     if env_file:
180         deploy_options.append(env_file)
181
182     if ds_opts['containers']:
183         deploy_options.append('docker-images.yaml')
184         sdn_docker_file = get_docker_sdn_file(ds_opts)
185         if sdn_docker_file:
186             deploy_options.append(sdn_docker_file)
187             deploy_options.append('sdn-images.yaml')
188     else:
189         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
190
191     for k, v in OTHER_FILE_MAP.items():
192         if k in ds_opts and ds_opts[k]:
193             if ds_opts['containers']:
194                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
195                                                    "{}.yaml".format(k)))
196             else:
197                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
198
199     if ds_opts['ceph']:
200         prep_storage_env(ds, ns, virtual, tmp_dir)
201         deploy_options.append(os.path.join(con.THT_ENV_DIR,
202                                            'storage-environment.yaml'))
203     if ds_opts['sriov']:
204         prep_sriov_env(ds, tmp_dir)
205
206     if virtual:
207         deploy_options.append('virtual-environment.yaml')
208     else:
209         deploy_options.append('baremetal-environment.yaml')
210
211     num_control, num_compute = inv.get_node_counts()
212     if num_control == 0 or num_compute == 0:
213         logging.error("Detected 0 control or compute nodes.  Control nodes: "
214                       "{}, compute nodes{}".format(num_control, num_compute))
215         raise ApexDeployException("Invalid number of control or computes")
216     elif num_control > 1 and not ds['global_params']['ha_enabled']:
217         num_control = 1
218     if platform.machine() == 'aarch64':
219         # aarch64 deploys were not completing in the default 90 mins.
220         # Not sure if this is related to the hardware the OOO support
221         # was developed on or the virtualization support in CentOS
222         # Either way it will probably get better over time  as the aarch
223         # support matures in CentOS and deploy time should be tested in
224         # the future so this multiplier can be removed.
225         con.DEPLOY_TIMEOUT *= 2
226     cmd = "openstack overcloud deploy --templates --timeout {} " \
227           .format(con.DEPLOY_TIMEOUT)
228     # build cmd env args
229     for option in deploy_options:
230         cmd += " -e {}".format(option)
231     cmd += " --ntp-server {}".format(ns['ntp'][0])
232     cmd += " --control-scale {}".format(num_control)
233     cmd += " --compute-scale {}".format(num_compute)
234     cmd += ' --control-flavor control --compute-flavor compute'
235     if net_data:
236         cmd += ' --networks-file network_data.yaml'
237     libvirt_type = 'kvm'
238     if virtual:
239         with open('/sys/module/kvm_intel/parameters/nested') as f:
240             nested_kvm = f.read().strip()
241             if nested_kvm != 'Y':
242                 libvirt_type = 'qemu'
243     cmd += ' --libvirt-type {}'.format(libvirt_type)
244     logging.info("Deploy command set: {}".format(cmd))
245
246     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
247         fh.write(cmd)
248     return cmd
249
250
251 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
252                patches=None, upstream=False):
253     """
254     Locates sdn image and preps for deployment.
255     :param ds: deploy settings
256     :param ns: network settings
257     :param img: sdn image
258     :param tmp_dir: dir to store modified sdn image
259     :param root_pw: password to configure for overcloud image
260     :param docker_tag: Docker image tag for RDO version (default None)
261     :param patches: List of patches to apply to overcloud image
262     :param upstream: (boolean) Indicates if upstream deployment or not
263     :return: None
264     """
265     # TODO(trozet): Come up with a better way to organize this logic in this
266     # function
267     logging.info("Preparing image: {} for deployment".format(img))
268     if not os.path.isfile(img):
269         logging.error("Missing SDN image {}".format(img))
270         raise ApexDeployException("Missing SDN image file: {}".format(img))
271
272     ds_opts = ds['deploy_options']
273     virt_cmds = list()
274     sdn = ds_opts['sdn_controller']
275     patched_containers = set()
276     # we need this due to rhbz #1436021
277     # fixed in systemd-219-37.el7
278     if sdn is not False:
279         logging.info("Neutron openvswitch-agent disabled")
280         virt_cmds.extend([{
281             con.VIRT_RUN_CMD:
282                 "rm -f /etc/systemd/system/multi-user.target.wants/"
283                 "neutron-openvswitch-agent.service"},
284             {
285             con.VIRT_RUN_CMD:
286                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
287                 ".service"
288         }])
289
290     if ns.get('http_proxy', ''):
291         virt_cmds.append({
292             con.VIRT_RUN_CMD:
293                 "echo 'http_proxy={}' >> /etc/environment".format(
294                     ns['http_proxy'])})
295
296     if ns.get('https_proxy', ''):
297         virt_cmds.append({
298             con.VIRT_RUN_CMD:
299                 "echo 'https_proxy={}' >> /etc/environment".format(
300                     ns['https_proxy'])})
301
302     if ds_opts['vpn']:
303         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
304         virt_cmds.append({
305             con.VIRT_RUN_CMD:
306                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
307                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
308         virt_cmds.append({
309             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
310                               "zrpcd_start.sh"})
311         virt_cmds.append({
312             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
313                               "init.d/zrpcd_start.sh' /etc/rc.local "})
314         virt_cmds.append({
315             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
316                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
317         logging.info("ZRPCD process started")
318
319     dataplane = ds_opts['dataplane']
320     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
321         logging.info("Enabling kernel modules for dpdk")
322         # file to module mapping
323         uio_types = {
324             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
325             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
326         }
327         for mod_file, mod in uio_types.items():
328             with open(mod_file, 'w') as fh:
329                 fh.write('#!/bin/bash\n')
330                 fh.write('exec /sbin/modprobe {}'.format(mod))
331                 fh.close()
332
333             virt_cmds.extend([
334                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
335                     mod_file)},
336                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
337                                    "{}".format(os.path.basename(mod_file))}
338             ])
339     if root_pw:
340         pw_op = "password:{}".format(root_pw)
341         virt_cmds.append({con.VIRT_PW: pw_op})
342     if ds_opts['sfc'] and dataplane == 'ovs':
343         virt_cmds.extend([
344             {con.VIRT_RUN_CMD: "yum -y install "
345                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
346                                "{}".format(OVS_NSH_KMOD_RPM)},
347             {con.VIRT_RUN_CMD: "yum downgrade -y "
348                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
349                                "{}".format(OVS_NSH_RPM)}
350         ])
351     if dataplane == 'fdio':
352         # Patch neutron with using OVS external interface for router
353         # and add generic linux NS interface driver
354         virt_cmds.append(
355             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
356                                "-p1 < neutron-patch-NSDriver.patch"})
357         if sdn is False:
358             virt_cmds.extend([
359                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
360                 {con.VIRT_RUN_CMD: "yum install -y "
361                                    "/root/nosdn_vpp_rpms/*.rpm"}
362             ])
363
364     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
365     shutil.copyfile(img, tmp_oc_image)
366     logging.debug("Temporary overcloud image stored as: {}".format(
367         tmp_oc_image))
368
369     # TODO (trozet): remove this if block after Fraser
370     if sdn == 'opendaylight' and not upstream:
371         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
372             virt_cmds.extend([
373                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
374                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
375                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
376                                    "/root/puppet-opendaylight-"
377                                    "{}.tar.gz".format(ds_opts['odl_version'])}
378             ])
379             if ds_opts['odl_version'] == 'master':
380                 virt_cmds.extend([
381                     {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
382                         ds_opts['odl_version'])}
383                 ])
384             else:
385                 virt_cmds.extend([
386                     {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
387                         ds_opts['odl_version'])}
388                 ])
389
390         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
391                 and ds_opts['odl_vpp_netvirt']:
392             virt_cmds.extend([
393                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
394                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
395                     ODL_NETVIRT_VPP_RPM)}
396             ])
397     elif sdn == 'opendaylight':
398         undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
399             'installer_vm']['ip']
400         oc_builder.inject_opendaylight(
401             odl_version=ds_opts['odl_version'],
402             image=tmp_oc_image,
403             tmp_dir=tmp_dir,
404             uc_ip=undercloud_admin_ip,
405             os_version=ds_opts['os_version'],
406             docker_tag=docker_tag,
407         )
408         if docker_tag:
409             patched_containers = patched_containers.union({'opendaylight'})
410
411     if sdn == 'ovn':
412         virt_cmds.extend([
413             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
414                                "*openvswitch*"},
415             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
416                                "*openvswitch*"}
417         ])
418
419     if patches:
420         if ds_opts['os_version'] == 'master':
421             branch = ds_opts['os_version']
422         else:
423             branch = "stable/{}".format(ds_opts['os_version'])
424         logging.info('Adding patches to overcloud')
425         patched_containers = patched_containers.union(
426             c_builder.add_upstream_patches(patches,
427                                            tmp_oc_image, tmp_dir,
428                                            branch,
429                                            uc_ip=undercloud_admin_ip,
430                                            docker_tag=docker_tag))
431     # if containers with ceph, and no ceph device we need to use a
432     # persistent loop device for Ceph OSDs
433     if docker_tag and not ds_opts.get('ceph_device', None):
434         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
435         with open(tmp_losetup, 'w') as fh:
436             fh.write(LOSETUP_SERVICE)
437         virt_cmds.extend([
438             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
439              },
440             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
441             {con.VIRT_RUN_CMD: 'mkfs.ext4 -F /srv/data.img'},
442             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
443             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
444         ])
445     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
446     logging.info("Overcloud image customization complete")
447     return patched_containers
448
449
450 def make_ssh_key():
451     """
452     Creates public and private ssh keys with 1024 bit RSA encryption
453     :return: private, public key
454     """
455     key = rsa.generate_private_key(
456         backend=crypto_default_backend(),
457         public_exponent=65537,
458         key_size=1024
459     )
460
461     private_key = key.private_bytes(
462         crypto_serialization.Encoding.PEM,
463         crypto_serialization.PrivateFormat.PKCS8,
464         crypto_serialization.NoEncryption())
465     public_key = key.public_key().public_bytes(
466         crypto_serialization.Encoding.OpenSSH,
467         crypto_serialization.PublicFormat.OpenSSH
468     )
469     return private_key.decode('utf-8'), public_key.decode('utf-8')
470
471
472 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
473     """
474     Creates modified opnfv/network environments for deployment
475     :param ds: deploy settings
476     :param ns: network settings
477     :param inv: node inventory
478     :param opnfv_env: file path for opnfv-environment file
479     :param net_env: file path for network-environment file
480     :param tmp_dir: Apex tmp dir
481     :return:
482     """
483
484     logging.info("Preparing opnfv-environment and network-environment files")
485     ds_opts = ds['deploy_options']
486     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
487     shutil.copyfile(opnfv_env, tmp_opnfv_env)
488     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
489     tenant_nic = dict()
490     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
491     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
492     external_nic_map = ns['networks']['external'][0]['nic_mapping']
493     external_nic = dict()
494     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
495
496     # SSH keys
497     private_key, public_key = make_ssh_key()
498
499     # Make easier/faster variables to index in the file editor
500     if 'performance' in ds_opts:
501         perf = True
502         # vpp
503         if 'vpp' in ds_opts['performance']['Compute']:
504             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
505         else:
506             perf_vpp_comp = None
507         if 'vpp' in ds_opts['performance']['Controller']:
508             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
509         else:
510             perf_vpp_ctrl = None
511
512         # ovs
513         if 'ovs' in ds_opts['performance']['Compute']:
514             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
515         else:
516             perf_ovs_comp = None
517
518         # kernel
519         if 'kernel' in ds_opts['performance']['Compute']:
520             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
521         else:
522             perf_kern_comp = None
523     else:
524         perf = False
525
526     # Modify OPNFV environment
527     # TODO: Change to build a dict and outputting yaml rather than parsing
528     for line in fileinput.input(tmp_opnfv_env, inplace=True):
529         line = line.strip('\n')
530         output_line = line
531         if 'CloudDomain' in line:
532             output_line = "  CloudDomain: {}".format(ns['domain_name'])
533         elif 'replace_private_key' in line:
534             output_line = "    private_key: |\n"
535             key_out = ''
536             for line in private_key.splitlines():
537                 key_out += "      {}\n".format(line)
538             output_line += key_out
539         elif 'replace_public_key' in line:
540             output_line = "    public_key: '{}'".format(public_key)
541         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
542                 'resource_registry' in line:
543             output_line = "resource_registry:\n" \
544                           "  OS::TripleO::NodeUserData: first-boot.yaml"
545         elif 'ComputeExtraConfigPre' in line and \
546                 ds_opts['dataplane'] == 'ovs_dpdk':
547             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
548                           './ovs-dpdk-preconfig.yaml'
549
550         if ds_opts['sdn_controller'] == 'opendaylight' and \
551                 'odl_vpp_routing_node' in ds_opts:
552             if 'opendaylight::vpp_routing_node' in line:
553                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
554                                .format(ds_opts['odl_vpp_routing_node'],
555                                        ns['domain_name']))
556         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
557             if 'NeutronVPPAgentPhysnets' in line:
558                 # VPP interface tap0 will be used for external network
559                 # connectivity.
560                 output_line = ("  NeutronVPPAgentPhysnets: "
561                                "'datacentre:{},external:tap0'"
562                                .format(tenant_nic['Controller']))
563         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
564                 'dvr') is True:
565             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
566                 output_line = ''
567             elif 'NeutronDhcpAgentsPerNetwork' in line:
568                 num_control, num_compute = inv.get_node_counts()
569                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
570                                .format(num_compute))
571             elif 'ComputeServices' in line:
572                 output_line = ("  ComputeServices:\n"
573                                "    - OS::TripleO::Services::NeutronDhcpAgent")
574         # SRIOV networks are VLAN based provider networks. In order to simplify
575         # the deployment, nfv_sriov will be the default physnet. VLANs are not
576         # needed in advance, and the user will have to create the network
577         # specifying the segmentation-id.
578         if ds_opts['sriov']:
579             if 'NeutronNetworkVLANRanges' in line:
580                 output_line = ("{},nfv_sriov'".format(line[:-1]))
581
582         if perf:
583             for role in 'NovaCompute', 'Controller':
584                 if role == 'NovaCompute':
585                     perf_opts = perf_vpp_comp
586                 else:
587                     perf_opts = perf_vpp_ctrl
588                 cfg = "{}ExtraConfig".format(role)
589                 if cfg in line and perf_opts:
590                     perf_line = ''
591                     if 'main-core' in perf_opts:
592                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
593                                       .format(perf_opts['main-core']))
594                     if 'corelist-workers' in perf_opts:
595                         perf_line += ("\n    "
596                                       "fdio::vpp_cpu_corelist_workers: '{}'"
597                                       .format(perf_opts['corelist-workers']))
598                     if ds_opts['sdn_controller'] == 'opendaylight' and \
599                             ds_opts['dataplane'] == 'fdio':
600                         if role == 'NovaCompute':
601                             perf_line += ("\n    "
602                                           "tripleo::profile::base::neutron::"
603                                           "agents::honeycomb::"
604                                           "interface_role_mapping:"
605                                           " ['{}:tenant-interface',"
606                                           "'{}:public-interface']"
607                                           .format(tenant_nic[role],
608                                                   external_nic[role]))
609                         else:
610                             perf_line += ("\n    "
611                                           "tripleo::profile::base::neutron::"
612                                           "agents::honeycomb::"
613                                           "interface_role_mapping:"
614                                           " ['{}:tenant-interface']"
615                                           .format(tenant_nic[role]))
616                     if perf_line:
617                         output_line = ("  {}:{}".format(cfg, perf_line))
618
619             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
620                 for k, v in OVS_PERF_MAP.items():
621                     if k in line and v in perf_ovs_comp:
622                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
623
624             # kernel args
625             # (FIXME) use compute's kernel settings for all nodes for now.
626             if perf_kern_comp:
627                 if 'NovaSchedulerDefaultFilters' in line:
628                     output_line = \
629                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
630                         "ComputeFilter,AvailabilityZoneFilter," \
631                         "ComputeCapabilitiesFilter," \
632                         "ImagePropertiesFilter,NUMATopologyFilter'"
633                 elif 'ComputeKernelArgs' in line:
634                     kernel_args = ''
635                     for k, v in perf_kern_comp.items():
636                         kernel_args += "{}={} ".format(k, v)
637                     if kernel_args:
638                         output_line = "  ComputeKernelArgs: '{}'".\
639                             format(kernel_args)
640
641         print(output_line)
642
643     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
644
645
646 def generate_ceph_key():
647     key = os.urandom(16)
648     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
649     return base64.b64encode(header + key)
650
651
652 def prep_storage_env(ds, ns, virtual, tmp_dir):
653     """
654     Creates storage environment file for deployment.  Source file is copied by
655     undercloud playbook to host.
656     :param ds:
657     :param ns:
658     :param virtual:
659     :param tmp_dir:
660     :return:
661     """
662     ds_opts = ds['deploy_options']
663     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
664     if not os.path.isfile(storage_file):
665         logging.error("storage-environment file is not in tmp directory: {}. "
666                       "Check if file was copied from "
667                       "undercloud".format(tmp_dir))
668         raise ApexDeployException("storage-environment file not copied from "
669                                   "undercloud")
670     for line in fileinput.input(storage_file, inplace=True):
671         line = line.strip('\n')
672         if 'CephClusterFSID' in line:
673             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
674         elif 'CephMonKey' in line:
675             print("  CephMonKey: {}".format(generate_ceph_key().decode(
676                 'utf-8')))
677         elif 'CephAdminKey' in line:
678             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
679                 'utf-8')))
680         elif 'CephClientKey' in line:
681             print("  CephClientKey: {}".format(generate_ceph_key().decode(
682                 'utf-8')))
683         else:
684             print(line)
685
686     if ds_opts['containers']:
687         undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
688             'installer_vm']['ip']
689         ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
690         docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
691                        "{}-centos-7".format(undercloud_admin_ip,
692                                             ceph_version)
693         ceph_params = {
694             'DockerCephDaemonImage': docker_image,
695         }
696         if not ds['global_params']['ha_enabled']:
697             ceph_params['CephPoolDefaultSize'] = 1
698
699         if virtual:
700             ceph_params['CephAnsibleExtraConfig'] = {
701                 'centos_package_dependencies': [],
702                 'ceph_osd_docker_memory_limit': '1g',
703                 'ceph_mds_docker_memory_limit': '1g',
704             }
705             ceph_params['CephPoolDefaultPgNum'] = 32
706         if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
707             ceph_device = ds_opts['ceph_device']
708         else:
709             # TODO(trozet): make this DS default after Fraser
710             ceph_device = '/dev/loop3'
711
712         ceph_params['CephAnsibleDisksConfig'] = {
713             'devices': [ceph_device],
714             'journal_size': 512,
715             'osd_scenario': 'collocated'
716         }
717         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
718     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
719         with open(storage_file, 'a') as fh:
720             fh.write('  ExtraConfig:\n')
721             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
722                 ds_opts['ceph_device']
723             ))
724
725
726 def prep_sriov_env(ds, tmp_dir):
727     """
728     Creates SRIOV environment file for deployment. Source file is copied by
729     undercloud playbook to host.
730     :param ds:
731     :param tmp_dir:
732     :return:
733     """
734     ds_opts = ds['deploy_options']
735     sriov_iface = ds_opts['sriov']
736     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
737     if not os.path.isfile(sriov_file):
738         logging.error("sriov-environment file is not in tmp directory: {}. "
739                       "Check if file was copied from "
740                       "undercloud".format(tmp_dir))
741         raise ApexDeployException("sriov-environment file not copied from "
742                                   "undercloud")
743     # TODO(rnoriega): Instead of line editing, refactor this code to load
744     # yaml file into a dict, edit it and write the file back.
745     for line in fileinput.input(sriov_file, inplace=True):
746         line = line.strip('\n')
747         if 'NovaSchedulerDefaultFilters' in line:
748             print("  {}".format(line[3:]))
749         elif 'NovaSchedulerAvailableFilters' in line:
750             print("  {}".format(line[3:]))
751         elif 'NeutronPhysicalDevMappings' in line:
752             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
753                   .format(sriov_iface))
754         elif 'NeutronSriovNumVFs' in line:
755             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
756         elif 'NovaPCIPassthrough' in line:
757             print("  NovaPCIPassthrough:")
758         elif 'devname' in line:
759             print("    - devname: \"{}\"".format(sriov_iface))
760         elif 'physical_network' in line:
761             print("      physical_network: \"nfv_sriov\"")
762         else:
763             print(line)
764
765
766 def external_network_cmds(ns, ds):
767     """
768     Generates external network openstack commands
769     :param ns: network settings
770     :param ds: deploy settings
771     :return: list of commands to configure external network
772     """
773     ds_opts = ds['deploy_options']
774     external_physnet = 'datacentre'
775     if ds_opts['dataplane'] == 'fdio' and \
776        ds_opts['sdn_controller'] != 'opendaylight':
777         external_physnet = 'external'
778     if 'external' in ns.enabled_network_list:
779         net_config = ns['networks']['external'][0]
780         external = True
781         pool_start, pool_end = net_config['floating_ip_range']
782     else:
783         net_config = ns['networks']['admin']
784         external = False
785         pool_start, pool_end = ns['apex']['networks']['admin'][
786             'introspection_range']
787     nic_config = net_config['nic_mapping']
788     gateway = net_config['gateway']
789     cmds = list()
790     # create network command
791     if nic_config['compute']['vlan'] == 'native':
792         ext_type = 'flat'
793     else:
794         ext_type = "vlan --provider-segment {}".format(nic_config[
795                                                        'compute']['vlan'])
796     cmds.append("openstack network create external --project service "
797                 "--external --provider-network-type {} "
798                 "--provider-physical-network {}"
799                 .format(ext_type, external_physnet))
800     # create subnet command
801     cidr = net_config['cidr']
802     subnet_cmd = "openstack subnet create external-subnet --project " \
803                  "service --network external --no-dhcp --gateway {} " \
804                  "--allocation-pool start={},end={} --subnet-range " \
805                  "{}".format(gateway, pool_start, pool_end, str(cidr))
806     if external and cidr.version == 6:
807         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
808                       '--ipv6-address-mode slaac'
809     cmds.append(subnet_cmd)
810     logging.debug("Neutron external network commands determined "
811                   "as: {}".format(cmds))
812     return cmds
813
814
815 def create_congress_cmds(overcloud_file):
816     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
817     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
818     logging.info("Creating congress commands")
819     try:
820         ds_cfg = [
821             "username={}".format(overcloudrc['OS_USERNAME']),
822             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
823             "password={}".format(overcloudrc['OS_PASSWORD']),
824             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
825         ]
826     except KeyError:
827         logging.error("Unable to find all keys required for congress in "
828                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
829                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
830                       "file: {}".format(overcloud_file))
831         raise
832     cmds = list()
833     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
834
835     for driver in drivers:
836         if driver == 'doctor':
837             cmd = "{} \"{}\"".format(driver, driver)
838         else:
839             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
840         if driver == 'nova':
841             cmd += ' --config api_version="2.34"'
842         logging.debug("Congress command created: {}".format(cmd))
843         cmds.append(cmd)
844     return cmds