Update OVS in overcloud
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import shutil
16 import uuid
17 import struct
18 import time
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
21
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
28     crypto_serialization
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31     crypto_default_backend
32
33
34 SDN_FILE_MAP = {
35     'opendaylight': {
36         'sfc': 'neutron-sfc-opendaylight.yaml',
37         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38         'gluon': 'gluon.yaml',
39         'vpp': {
40             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42             'default': 'neutron-opendaylight-honeycomb.yaml'
43         },
44         'l2gw': 'neutron-l2gw-opendaylight.yaml',
45         'sriov': 'neutron-opendaylight-sriov.yaml',
46         'default': 'neutron-opendaylight.yaml',
47     },
48     'onos': {
49         'sfc': 'neutron-onos-sfc.yaml',
50         'default': 'neutron-onos.yaml'
51     },
52     'ovn': 'neutron-ml2-ovn.yaml',
53     False: {
54         'vpp': 'neutron-ml2-vpp.yaml',
55         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
56     }
57 }
58
59 OTHER_FILE_MAP = {
60     'tacker': 'enable_tacker.yaml',
61     'congress': 'enable_congress.yaml',
62     'barometer': 'enable_barometer.yaml',
63     'rt_kvm': 'enable_rt_kvm.yaml'
64 }
65
66 OVS_PERF_MAP = {
67     'HostCpusList': 'dpdk_cores',
68     'NeutronDpdkCoreList': 'pmd_cores',
69     'NeutronDpdkSocketMemory': 'socket_memory',
70     'NeutronDpdkMemoryChannels': 'memory_channels'
71 }
72
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOSETUP_SERVICE = """[Unit]
79 Description=Setup loop devices
80 Before=network.target
81
82 [Service]
83 Type=oneshot
84 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
85 ExecStop=/sbin/losetup -d /dev/loop3
86 TimeoutSec=60
87 RemainAfterExit=yes
88
89 [Install]
90 WantedBy=multi-user.target
91 """
92
93
94 def build_sdn_env_list(ds, sdn_map, env_list=None):
95     """
96     Builds a list of SDN environment files to be used in the deploy cmd.
97
98     This function recursively searches an sdn_map.  First the sdn controller is
99     matched and then the function looks for enabled features for that
100     controller to determine which environment files should be used.  By
101     default the feature will be checked if set to true in deploy settings to be
102     added to the list.  If a feature does not have a boolean value, then the
103     key and value pair to compare with are checked as a tuple (k,v).
104
105     :param ds: deploy settings
106     :param sdn_map: SDN map to recursively search
107     :param env_list: recursive var to hold previously found env_list
108     :return: A list of env files
109     """
110     if env_list is None:
111         env_list = list()
112     for k, v in sdn_map.items():
113         if ds['sdn_controller'] == k or (k in ds and ds[k]):
114             if isinstance(v, dict):
115                 # Append default SDN env file first
116                 # The assumption is that feature-enabled SDN env files
117                 # override and do not conflict with previously set default
118                 # settings
119                 if ds['sdn_controller'] == k and 'default' in v:
120                     env_list.append(os.path.join(con.THT_ENV_DIR,
121                                                  v['default']))
122                 env_list.extend(build_sdn_env_list(ds, v))
123             # check if the value is not a boolean
124             elif isinstance(v, tuple):
125                     if ds[k] == v[0]:
126                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
127             else:
128                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
129     if len(env_list) == 0:
130         try:
131             env_list.append(os.path.join(
132                 con.THT_ENV_DIR, sdn_map['default']))
133         except KeyError:
134             logging.warning("Unable to find default file for SDN")
135
136     return env_list
137
138
139 def get_docker_sdn_file(ds_opts):
140     """
141     Returns docker env file for detected SDN
142     :param ds_opts: deploy options
143     :return: docker THT env file for an SDN
144     """
145     # FIXME(trozet): We assume right now there is only one docker SDN file
146     docker_services = con.VALID_DOCKER_SERVICES
147     tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
148     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
149     for sdn_file in sdn_env_list:
150         sdn_base = os.path.basename(sdn_file)
151         if sdn_base in docker_services:
152             if docker_services[sdn_base] is not None:
153                 return os.path.join(tht_dir,
154                                     docker_services[sdn_base])
155             else:
156                 return os.path.join(tht_dir, sdn_base)
157
158
159 def create_deploy_cmd(ds, ns, inv, tmp_dir,
160                       virtual, env_file='opnfv-environment.yaml',
161                       net_data=False):
162
163     logging.info("Creating deployment command")
164     deploy_options = ['network-environment.yaml']
165
166     ds_opts = ds['deploy_options']
167
168     if ds_opts['containers']:
169         deploy_options.append(os.path.join(con.THT_ENV_DIR,
170                                            'docker.yaml'))
171
172     if ds['global_params']['ha_enabled']:
173         if ds_opts['containers']:
174             deploy_options.append(os.path.join(con.THT_ENV_DIR,
175                                                'docker-ha.yaml'))
176         else:
177             deploy_options.append(os.path.join(con.THT_ENV_DIR,
178                                                'puppet-pacemaker.yaml'))
179
180     if env_file:
181         deploy_options.append(env_file)
182
183     if ds_opts['containers']:
184         deploy_options.append('docker-images.yaml')
185         sdn_docker_file = get_docker_sdn_file(ds_opts)
186         if sdn_docker_file:
187             deploy_options.append(sdn_docker_file)
188             deploy_options.append('sdn-images.yaml')
189     else:
190         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
191
192     for k, v in OTHER_FILE_MAP.items():
193         if k in ds_opts and ds_opts[k]:
194             if ds_opts['containers']:
195                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
196                                                    "{}.yaml".format(k)))
197             else:
198                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
199
200     if ds_opts['ceph'] and 'csit' not in env_file:
201         prep_storage_env(ds, ns, virtual, tmp_dir)
202         deploy_options.append(os.path.join(con.THT_ENV_DIR,
203                                            'storage-environment.yaml'))
204     if ds_opts['sriov']:
205         prep_sriov_env(ds, tmp_dir)
206
207     # Check for 'k8s' here intentionally, as we may support other values
208     # such as openstack/openshift for 'vim' option.
209     if ds_opts['vim'] == 'k8s':
210         deploy_options.append('kubernetes-environment.yaml')
211
212     if virtual:
213         deploy_options.append('virtual-environment.yaml')
214     else:
215         deploy_options.append('baremetal-environment.yaml')
216
217     num_control, num_compute = inv.get_node_counts()
218     if num_control == 0 or num_compute == 0:
219         logging.error("Detected 0 control or compute nodes.  Control nodes: "
220                       "{}, compute nodes{}".format(num_control, num_compute))
221         raise ApexDeployException("Invalid number of control or computes")
222     elif num_control > 1 and not ds['global_params']['ha_enabled']:
223         num_control = 1
224     if platform.machine() == 'aarch64':
225         # aarch64 deploys were not completing in the default 90 mins.
226         # Not sure if this is related to the hardware the OOO support
227         # was developed on or the virtualization support in CentOS
228         # Either way it will probably get better over time  as the aarch
229         # support matures in CentOS and deploy time should be tested in
230         # the future so this multiplier can be removed.
231         con.DEPLOY_TIMEOUT *= 2
232     cmd = "openstack overcloud deploy --templates --timeout {} " \
233           .format(con.DEPLOY_TIMEOUT)
234     # build cmd env args
235     for option in deploy_options:
236         cmd += " -e {}".format(option)
237     cmd += " --ntp-server {}".format(ns['ntp'][0])
238     cmd += " --control-scale {}".format(num_control)
239     cmd += " --compute-scale {}".format(num_compute)
240     cmd += ' --control-flavor control --compute-flavor compute'
241     if net_data:
242         cmd += ' --networks-file network_data.yaml'
243     libvirt_type = 'kvm'
244     if virtual:
245         with open('/sys/module/kvm_intel/parameters/nested') as f:
246             nested_kvm = f.read().strip()
247             if nested_kvm != 'Y':
248                 libvirt_type = 'qemu'
249     cmd += ' --libvirt-type {}'.format(libvirt_type)
250     logging.info("Deploy command set: {}".format(cmd))
251
252     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
253         fh.write(cmd)
254     return cmd
255
256
257 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
258                patches=None):
259     """
260     Locates sdn image and preps for deployment.
261     :param ds: deploy settings
262     :param ns: network settings
263     :param img: sdn image
264     :param tmp_dir: dir to store modified sdn image
265     :param root_pw: password to configure for overcloud image
266     :param docker_tag: Docker image tag for RDO version (default None)
267     :param patches: List of patches to apply to overcloud image
268     :return: None
269     """
270     # TODO(trozet): Come up with a better way to organize this logic in this
271     # function
272     logging.info("Preparing image: {} for deployment".format(img))
273     if not os.path.isfile(img):
274         logging.error("Missing SDN image {}".format(img))
275         raise ApexDeployException("Missing SDN image file: {}".format(img))
276
277     ds_opts = ds['deploy_options']
278     virt_cmds = list()
279     sdn = ds_opts['sdn_controller']
280     patched_containers = set()
281     # we need this due to rhbz #1436021
282     # fixed in systemd-219-37.el7
283     if sdn is not False:
284         logging.info("Neutron openvswitch-agent disabled")
285         virt_cmds.extend([{
286             con.VIRT_RUN_CMD:
287                 "rm -f /etc/systemd/system/multi-user.target.wants/"
288                 "neutron-openvswitch-agent.service"},
289             {
290             con.VIRT_RUN_CMD:
291                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
292                 ".service"
293         }])
294
295     if ns.get('http_proxy', ''):
296         virt_cmds.append({
297             con.VIRT_RUN_CMD:
298                 "echo 'http_proxy={}' >> /etc/environment".format(
299                     ns['http_proxy'])})
300
301     if ns.get('https_proxy', ''):
302         virt_cmds.append({
303             con.VIRT_RUN_CMD:
304                 "echo 'https_proxy={}' >> /etc/environment".format(
305                     ns['https_proxy'])})
306
307     if ds_opts['vpn']:
308         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
309         virt_cmds.append({
310             con.VIRT_RUN_CMD:
311                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
312                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
313         virt_cmds.append({
314             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
315                               "zrpcd_start.sh"})
316         virt_cmds.append({
317             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
318                               "init.d/zrpcd_start.sh' /etc/rc.local "})
319         virt_cmds.append({
320             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
321                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
322         logging.info("ZRPCD process started")
323
324     dataplane = ds_opts['dataplane']
325     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
326         logging.info("Enabling kernel modules for dpdk")
327         # file to module mapping
328         uio_types = {
329             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
330             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
331         }
332         for mod_file, mod in uio_types.items():
333             with open(mod_file, 'w') as fh:
334                 fh.write('#!/bin/bash\n')
335                 fh.write('exec /sbin/modprobe {}'.format(mod))
336                 fh.close()
337
338             virt_cmds.extend([
339                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
340                     mod_file)},
341                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
342                                    "{}".format(os.path.basename(mod_file))}
343             ])
344     if root_pw:
345         pw_op = "password:{}".format(root_pw)
346         virt_cmds.append({con.VIRT_PW: pw_op})
347
348     if dataplane == 'ovs':
349         if ds_opts['sfc']:
350             virt_cmds.extend([
351                 {con.VIRT_RUN_CMD: "yum -y install "
352                                    "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
353                                    "{}".format(OVS_NSH_KMOD_RPM)},
354                 {con.VIRT_RUN_CMD: "yum downgrade -y "
355                                    "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
356                                    "{}".format(OVS_NSH_RPM)}
357             ])
358         elif sdn == 'opendaylight':
359             # FIXME(trozet) remove this after RDO is updated with fix for
360             # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
361             ovs_file = os.path.basename(con.CUSTOM_OVS)
362             ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
363             utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
364                                             targets=[ovs_file])
365             virt_cmds.extend([
366                 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
367                                                                   ovs_file))},
368                 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
369                     ovs_file)}
370             ])
371     if dataplane == 'fdio':
372         # Patch neutron with using OVS external interface for router
373         # and add generic linux NS interface driver
374         virt_cmds.append(
375             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
376                                "-p1 < neutron-patch-NSDriver.patch"})
377         if sdn is False:
378             virt_cmds.extend([
379                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
380                 {con.VIRT_RUN_CMD: "yum install -y "
381                                    "/root/nosdn_vpp_rpms/*.rpm"}
382             ])
383
384     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
385     shutil.copyfile(img, tmp_oc_image)
386     logging.debug("Temporary overcloud image stored as: {}".format(
387         tmp_oc_image))
388
389     if sdn == 'opendaylight':
390         undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
391             'installer_vm']['ip']
392         oc_builder.inject_opendaylight(
393             odl_version=ds_opts['odl_version'],
394             image=tmp_oc_image,
395             tmp_dir=tmp_dir,
396             uc_ip=undercloud_admin_ip,
397             os_version=ds_opts['os_version'],
398             docker_tag=docker_tag,
399         )
400         if docker_tag:
401             patched_containers = patched_containers.union({'opendaylight'})
402
403     if patches:
404         if ds_opts['os_version'] == 'master':
405             branch = ds_opts['os_version']
406         else:
407             branch = "stable/{}".format(ds_opts['os_version'])
408         logging.info('Adding patches to overcloud')
409         patched_containers = patched_containers.union(
410             c_builder.add_upstream_patches(patches,
411                                            tmp_oc_image, tmp_dir,
412                                            branch,
413                                            uc_ip=undercloud_admin_ip,
414                                            docker_tag=docker_tag))
415     # if containers with ceph, and no ceph device we need to use a
416     # persistent loop device for Ceph OSDs
417     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
418         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
419         with open(tmp_losetup, 'w') as fh:
420             fh.write(LOSETUP_SERVICE)
421         virt_cmds.extend([
422             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
423              },
424             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
425             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
426             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
427         ])
428     # TODO(trozet) remove this after LP#173474 is fixed
429     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
430     virt_cmds.append(
431         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
432                            "ConditionPathExists".format(dhcp_unit)})
433     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
434     logging.info("Overcloud image customization complete")
435     return patched_containers
436
437
438 def make_ssh_key():
439     """
440     Creates public and private ssh keys with 1024 bit RSA encryption
441     :return: private, public key
442     """
443     key = rsa.generate_private_key(
444         backend=crypto_default_backend(),
445         public_exponent=65537,
446         key_size=1024
447     )
448
449     private_key = key.private_bytes(
450         crypto_serialization.Encoding.PEM,
451         crypto_serialization.PrivateFormat.PKCS8,
452         crypto_serialization.NoEncryption())
453     public_key = key.public_key().public_bytes(
454         crypto_serialization.Encoding.OpenSSH,
455         crypto_serialization.PublicFormat.OpenSSH
456     )
457     return private_key.decode('utf-8'), public_key.decode('utf-8')
458
459
460 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
461     """
462     Creates modified opnfv/network environments for deployment
463     :param ds: deploy settings
464     :param ns: network settings
465     :param inv: node inventory
466     :param opnfv_env: file path for opnfv-environment file
467     :param net_env: file path for network-environment file
468     :param tmp_dir: Apex tmp dir
469     :return:
470     """
471
472     logging.info("Preparing opnfv-environment and network-environment files")
473     ds_opts = ds['deploy_options']
474     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
475     shutil.copyfile(opnfv_env, tmp_opnfv_env)
476     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
477     tenant_nic = dict()
478     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
479     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
480     external_nic_map = ns['networks']['external'][0]['nic_mapping']
481     external_nic = dict()
482     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
483
484     # SSH keys
485     private_key, public_key = make_ssh_key()
486
487     # Make easier/faster variables to index in the file editor
488     if 'performance' in ds_opts:
489         perf = True
490         # vpp
491         if 'vpp' in ds_opts['performance']['Compute']:
492             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
493         else:
494             perf_vpp_comp = None
495         if 'vpp' in ds_opts['performance']['Controller']:
496             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
497         else:
498             perf_vpp_ctrl = None
499
500         # ovs
501         if 'ovs' in ds_opts['performance']['Compute']:
502             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
503         else:
504             perf_ovs_comp = None
505
506         # kernel
507         if 'kernel' in ds_opts['performance']['Compute']:
508             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
509         else:
510             perf_kern_comp = None
511     else:
512         perf = False
513
514     tenant_settings = ns['networks']['tenant']
515     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
516         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
517
518     # Modify OPNFV environment
519     # TODO: Change to build a dict and outputting yaml rather than parsing
520     for line in fileinput.input(tmp_opnfv_env, inplace=True):
521         line = line.strip('\n')
522         output_line = line
523         if 'CloudDomain' in line:
524             output_line = "  CloudDomain: {}".format(ns['domain_name'])
525         elif 'replace_private_key' in line:
526             output_line = "    private_key: |\n"
527             key_out = ''
528             for line in private_key.splitlines():
529                 key_out += "      {}\n".format(line)
530             output_line += key_out
531         elif 'replace_public_key' in line:
532             output_line = "    public_key: '{}'".format(public_key)
533         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
534                 'resource_registry' in line:
535             output_line = "resource_registry:\n" \
536                           "  OS::TripleO::NodeUserData: first-boot.yaml"
537         elif 'ComputeExtraConfigPre' in line and \
538                 ds_opts['dataplane'] == 'ovs_dpdk':
539             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
540                           './ovs-dpdk-preconfig.yaml'
541         elif 'NeutronNetworkVLANRanges' in line:
542             vlan_setting = ''
543             if tenant_vlan_enabled:
544                 if ns['networks']['tenant']['overlay_id_range']:
545                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
546                     if 'datacentre' not in vlan_setting:
547                         vlan_setting += ',datacentre:1:1000'
548             # SRIOV networks are VLAN based provider networks. In order to
549             # simplify the deployment, nfv_sriov will be the default physnet.
550             # VLANs are not needed in advance, and the user will have to create
551             # the network specifying the segmentation-id.
552             if ds_opts['sriov']:
553                 if vlan_setting:
554                     vlan_setting += ",nfv_sriov"
555                 else:
556                     vlan_setting = "datacentre:1:1000,nfv_sriov"
557             if vlan_setting:
558                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
559         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
560             if tenant_settings['overlay_id_range']:
561                 physnets = tenant_settings['overlay_id_range'].split(',')
562                 output_line = "  NeutronBridgeMappings: "
563                 for physnet in physnets:
564                     physnet_name = physnet.split(':')[0]
565                     if physnet_name != 'datacentre':
566                         output_line += "{}:br-vlan,".format(physnet_name)
567                 output_line += "datacentre:br-ex"
568         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
569                 and ds_opts['sdn_controller'] == 'opendaylight':
570             if tenant_settings['overlay_id_range']:
571                 physnets = tenant_settings['overlay_id_range'].split(',')
572                 output_line = "  OpenDaylightProviderMappings: "
573                 for physnet in physnets:
574                     physnet_name = physnet.split(':')[0]
575                     if physnet_name != 'datacentre':
576                         output_line += "{}:br-vlan,".format(physnet_name)
577                 output_line += "datacentre:br-ex"
578         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
579             output_line = "  NeutronNetworkType: vlan\n" \
580                           "  NeutronTunnelTypes: ''"
581
582         if ds_opts['sdn_controller'] == 'opendaylight' and \
583                 'odl_vpp_routing_node' in ds_opts:
584             if 'opendaylight::vpp_routing_node' in line:
585                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
586                                .format(ds_opts['odl_vpp_routing_node'],
587                                        ns['domain_name']))
588         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
589             if 'NeutronVPPAgentPhysnets' in line:
590                 # VPP interface tap0 will be used for external network
591                 # connectivity.
592                 output_line = ("  NeutronVPPAgentPhysnets: "
593                                "'datacentre:{},external:tap0'"
594                                .format(tenant_nic['Controller']))
595         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
596                 'dvr') is True:
597             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
598                 output_line = ''
599             elif 'NeutronDhcpAgentsPerNetwork' in line:
600                 num_control, num_compute = inv.get_node_counts()
601                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
602                                .format(num_compute))
603             elif 'ComputeServices' in line:
604                 output_line = ("  ComputeServices:\n"
605                                "    - OS::TripleO::Services::NeutronDhcpAgent")
606
607         if perf:
608             for role in 'NovaCompute', 'Controller':
609                 if role == 'NovaCompute':
610                     perf_opts = perf_vpp_comp
611                 else:
612                     perf_opts = perf_vpp_ctrl
613                 cfg = "{}ExtraConfig".format(role)
614                 if cfg in line and perf_opts:
615                     perf_line = ''
616                     if 'main-core' in perf_opts:
617                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
618                                       .format(perf_opts['main-core']))
619                     if 'corelist-workers' in perf_opts:
620                         perf_line += ("\n    "
621                                       "fdio::vpp_cpu_corelist_workers: '{}'"
622                                       .format(perf_opts['corelist-workers']))
623                     if ds_opts['sdn_controller'] == 'opendaylight' and \
624                             ds_opts['dataplane'] == 'fdio':
625                         if role == 'NovaCompute':
626                             perf_line += ("\n    "
627                                           "tripleo::profile::base::neutron::"
628                                           "agents::honeycomb::"
629                                           "interface_role_mapping:"
630                                           " ['{}:tenant-interface',"
631                                           "'{}:public-interface']"
632                                           .format(tenant_nic[role],
633                                                   external_nic[role]))
634                         else:
635                             perf_line += ("\n    "
636                                           "tripleo::profile::base::neutron::"
637                                           "agents::honeycomb::"
638                                           "interface_role_mapping:"
639                                           " ['{}:tenant-interface']"
640                                           .format(tenant_nic[role]))
641                     if perf_line:
642                         output_line = ("  {}:{}".format(cfg, perf_line))
643
644             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
645                 for k, v in OVS_PERF_MAP.items():
646                     if k in line and v in perf_ovs_comp:
647                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
648
649             # kernel args
650             # (FIXME) use compute's kernel settings for all nodes for now.
651             if perf_kern_comp:
652                 if 'NovaSchedulerDefaultFilters' in line:
653                     output_line = \
654                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
655                         "ComputeFilter,AvailabilityZoneFilter," \
656                         "ComputeCapabilitiesFilter," \
657                         "ImagePropertiesFilter,NUMATopologyFilter'"
658                 elif 'ComputeKernelArgs' in line:
659                     kernel_args = ''
660                     for k, v in perf_kern_comp.items():
661                         kernel_args += "{}={} ".format(k, v)
662                     if kernel_args:
663                         output_line = "  ComputeKernelArgs: '{}'".\
664                             format(kernel_args)
665
666         print(output_line)
667
668     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
669
670
671 def generate_ceph_key():
672     key = os.urandom(16)
673     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
674     return base64.b64encode(header + key)
675
676
677 def prep_storage_env(ds, ns, virtual, tmp_dir):
678     """
679     Creates storage environment file for deployment.  Source file is copied by
680     undercloud playbook to host.
681     :param ds:
682     :param ns:
683     :param virtual:
684     :param tmp_dir:
685     :return:
686     """
687     ds_opts = ds['deploy_options']
688     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
689     if not os.path.isfile(storage_file):
690         logging.error("storage-environment file is not in tmp directory: {}. "
691                       "Check if file was copied from "
692                       "undercloud".format(tmp_dir))
693         raise ApexDeployException("storage-environment file not copied from "
694                                   "undercloud")
695     for line in fileinput.input(storage_file, inplace=True):
696         line = line.strip('\n')
697         if 'CephClusterFSID' in line:
698             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
699         elif 'CephMonKey' in line:
700             print("  CephMonKey: {}".format(generate_ceph_key().decode(
701                 'utf-8')))
702         elif 'CephAdminKey' in line:
703             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
704                 'utf-8')))
705         elif 'CephClientKey' in line:
706             print("  CephClientKey: {}".format(generate_ceph_key().decode(
707                 'utf-8')))
708         else:
709             print(line)
710
711     if ds_opts['containers']:
712         ceph_params = {}
713
714         # max pgs allowed are calculated as num_mons * 200. Therefore we
715         # set number of pgs and pools so that the total will be less:
716         # num_pgs * num_pools * num_osds
717         ceph_params['CephPoolDefaultSize'] = 2
718         ceph_params['CephPoolDefaultPgNum'] = 32
719         if virtual:
720             ceph_params['CephAnsibleExtraConfig'] = {
721                 'centos_package_dependencies': [],
722                 'ceph_osd_docker_memory_limit': '1g',
723                 'ceph_mds_docker_memory_limit': '1g',
724             }
725         ceph_device = ds_opts['ceph_device']
726         ceph_params['CephAnsibleDisksConfig'] = {
727             'devices': [ceph_device],
728             'journal_size': 512,
729             'osd_scenario': 'collocated'
730         }
731         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
732     # TODO(trozet): remove following block as we only support containers now
733     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
734         with open(storage_file, 'a') as fh:
735             fh.write('  ExtraConfig:\n')
736             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
737                 ds_opts['ceph_device']
738             ))
739
740
741 def prep_sriov_env(ds, tmp_dir):
742     """
743     Creates SRIOV environment file for deployment. Source file is copied by
744     undercloud playbook to host.
745     :param ds:
746     :param tmp_dir:
747     :return:
748     """
749     ds_opts = ds['deploy_options']
750     sriov_iface = ds_opts['sriov']
751     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
752     if not os.path.isfile(sriov_file):
753         logging.error("sriov-environment file is not in tmp directory: {}. "
754                       "Check if file was copied from "
755                       "undercloud".format(tmp_dir))
756         raise ApexDeployException("sriov-environment file not copied from "
757                                   "undercloud")
758     # TODO(rnoriega): Instead of line editing, refactor this code to load
759     # yaml file into a dict, edit it and write the file back.
760     for line in fileinput.input(sriov_file, inplace=True):
761         line = line.strip('\n')
762         if 'NovaSchedulerDefaultFilters' in line:
763             print("  {}".format(line[3:]))
764         elif 'NovaSchedulerAvailableFilters' in line:
765             print("  {}".format(line[3:]))
766         elif 'NeutronPhysicalDevMappings' in line:
767             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
768                   .format(sriov_iface))
769         elif 'NeutronSriovNumVFs' in line:
770             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
771         elif 'NovaPCIPassthrough' in line:
772             print("  NovaPCIPassthrough:")
773         elif 'devname' in line:
774             print("    - devname: \"{}\"".format(sriov_iface))
775         elif 'physical_network' in line:
776             print("      physical_network: \"nfv_sriov\"")
777         else:
778             print(line)
779
780
781 def external_network_cmds(ns, ds):
782     """
783     Generates external network openstack commands
784     :param ns: network settings
785     :param ds: deploy settings
786     :return: list of commands to configure external network
787     """
788     ds_opts = ds['deploy_options']
789     external_physnet = 'datacentre'
790     if ds_opts['dataplane'] == 'fdio' and \
791        ds_opts['sdn_controller'] != 'opendaylight':
792         external_physnet = 'external'
793     if 'external' in ns.enabled_network_list:
794         net_config = ns['networks']['external'][0]
795         external = True
796         pool_start, pool_end = net_config['floating_ip_range']
797     else:
798         net_config = ns['networks']['admin']
799         external = False
800         pool_start, pool_end = ns['apex']['networks']['admin'][
801             'introspection_range']
802     nic_config = net_config['nic_mapping']
803     gateway = net_config['gateway']
804     cmds = list()
805     # create network command
806     if nic_config['compute']['vlan'] == 'native':
807         ext_type = 'flat'
808     else:
809         ext_type = "vlan --provider-segment {}".format(nic_config[
810                                                        'compute']['vlan'])
811     cmds.append("openstack network create external --project service "
812                 "--external --provider-network-type {} "
813                 "--provider-physical-network {}"
814                 .format(ext_type, external_physnet))
815     # create subnet command
816     cidr = net_config['cidr']
817     subnet_cmd = "openstack subnet create external-subnet --project " \
818                  "service --network external --no-dhcp --gateway {} " \
819                  "--allocation-pool start={},end={} --subnet-range " \
820                  "{}".format(gateway, pool_start, pool_end, str(cidr))
821     if external and cidr.version == 6:
822         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
823                       '--ipv6-address-mode slaac'
824     cmds.append(subnet_cmd)
825     logging.debug("Neutron external network commands determined "
826                   "as: {}".format(cmds))
827     return cmds
828
829
830 def create_congress_cmds(overcloud_file):
831     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
832     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
833     logging.info("Creating congress commands")
834     try:
835         ds_cfg = [
836             "username={}".format(overcloudrc['OS_USERNAME']),
837             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
838             "password={}".format(overcloudrc['OS_PASSWORD']),
839             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
840         ]
841     except KeyError:
842         logging.error("Unable to find all keys required for congress in "
843                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
844                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
845                       "file: {}".format(overcloud_file))
846         raise
847     cmds = list()
848     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
849
850     for driver in drivers:
851         if driver == 'doctor':
852             cmd = "{} \"{}\"".format(driver, driver)
853         else:
854             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
855         if driver == 'nova':
856             cmd += ' --config api_version="2.34"'
857         logging.debug("Congress command created: {}".format(cmd))
858         cmds.append(cmd)
859     return cmds