Enable BGPVPN for master deployments
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import shutil
16 import uuid
17 import struct
18 import time
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
21
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
28     crypto_serialization
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31     crypto_default_backend
32
33
34 SDN_FILE_MAP = {
35     'opendaylight': {
36         'sfc': 'neutron-sfc-opendaylight.yaml',
37         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38         'gluon': 'gluon.yaml',
39         'vpp': {
40             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42             'default': 'neutron-opendaylight-honeycomb.yaml'
43         },
44         'l2gw': 'neutron-l2gw-opendaylight.yaml',
45         'sriov': 'neutron-opendaylight-sriov.yaml',
46         'default': 'neutron-opendaylight.yaml',
47     },
48     'onos': {
49         'sfc': 'neutron-onos-sfc.yaml',
50         'default': 'neutron-onos.yaml'
51     },
52     'ovn': 'neutron-ml2-ovn.yaml',
53     False: {
54         'vpp': 'neutron-ml2-vpp.yaml',
55         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
56     }
57 }
58
59 OTHER_FILE_MAP = {
60     'tacker': 'enable_tacker.yaml',
61     'congress': 'enable_congress.yaml',
62     'barometer': 'enable_barometer.yaml',
63     'rt_kvm': 'enable_rt_kvm.yaml'
64 }
65
66 OVS_PERF_MAP = {
67     'HostCpusList': 'dpdk_cores',
68     'NeutronDpdkCoreList': 'pmd_cores',
69     'NeutronDpdkSocketMemory': 'socket_memory',
70     'NeutronDpdkMemoryChannels': 'memory_channels'
71 }
72
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95
96 def build_sdn_env_list(ds, sdn_map, env_list=None):
97     """
98     Builds a list of SDN environment files to be used in the deploy cmd.
99
100     This function recursively searches an sdn_map.  First the sdn controller is
101     matched and then the function looks for enabled features for that
102     controller to determine which environment files should be used.  By
103     default the feature will be checked if set to true in deploy settings to be
104     added to the list.  If a feature does not have a boolean value, then the
105     key and value pair to compare with are checked as a tuple (k,v).
106
107     :param ds: deploy settings
108     :param sdn_map: SDN map to recursively search
109     :param env_list: recursive var to hold previously found env_list
110     :return: A list of env files
111     """
112     if env_list is None:
113         env_list = list()
114     for k, v in sdn_map.items():
115         if ds['sdn_controller'] == k or (k in ds and ds[k]):
116             if isinstance(v, dict):
117                 # Append default SDN env file first
118                 # The assumption is that feature-enabled SDN env files
119                 # override and do not conflict with previously set default
120                 # settings
121                 if ds['sdn_controller'] == k and 'default' in v:
122                     env_list.append(os.path.join(con.THT_ENV_DIR,
123                                                  v['default']))
124                 env_list.extend(build_sdn_env_list(ds, v))
125             # check if the value is not a boolean
126             elif isinstance(v, tuple):
127                     if ds[k] == v[0]:
128                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
129             else:
130                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
131     if len(env_list) == 0:
132         try:
133             env_list.append(os.path.join(
134                 con.THT_ENV_DIR, sdn_map['default']))
135         except KeyError:
136             logging.warning("Unable to find default file for SDN")
137
138     return env_list
139
140
141 def get_docker_sdn_files(ds_opts):
142     """
143     Returns docker env file for detected SDN
144     :param ds_opts: deploy options
145     :return: list of docker THT env files for an SDN
146     """
147     docker_services = con.VALID_DOCKER_SERVICES
148     tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
149     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
150     for i, sdn_file in enumerate(sdn_env_list):
151         sdn_base = os.path.basename(sdn_file)
152         if sdn_base in docker_services:
153             if docker_services[sdn_base] is not None:
154                 sdn_env_list[i] = \
155                     os.path.join(tht_dir, docker_services[sdn_base])
156             else:
157                 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
158     return sdn_env_list
159
160
161 def create_deploy_cmd(ds, ns, inv, tmp_dir,
162                       virtual, env_file='opnfv-environment.yaml',
163                       net_data=False):
164
165     logging.info("Creating deployment command")
166     deploy_options = ['network-environment.yaml']
167
168     ds_opts = ds['deploy_options']
169
170     if ds_opts['containers']:
171         deploy_options.append(os.path.join(con.THT_ENV_DIR,
172                                            'docker.yaml'))
173
174     if ds['global_params']['ha_enabled']:
175         if ds_opts['containers']:
176             deploy_options.append(os.path.join(con.THT_ENV_DIR,
177                                                'docker-ha.yaml'))
178         else:
179             deploy_options.append(os.path.join(con.THT_ENV_DIR,
180                                                'puppet-pacemaker.yaml'))
181
182     if env_file:
183         deploy_options.append(env_file)
184
185     if ds_opts['containers']:
186         deploy_options.append('docker-images.yaml')
187         sdn_docker_files = get_docker_sdn_files(ds_opts)
188         for sdn_docker_file in sdn_docker_files:
189             deploy_options.append(sdn_docker_file)
190         if sdn_docker_files:
191             deploy_options.append('sdn-images.yaml')
192     else:
193         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
194
195     for k, v in OTHER_FILE_MAP.items():
196         if k in ds_opts and ds_opts[k]:
197             if ds_opts['containers']:
198                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
199                                                    "{}.yaml".format(k)))
200             else:
201                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
202
203     if ds_opts['ceph'] and 'csit' not in env_file:
204         prep_storage_env(ds, ns, virtual, tmp_dir)
205         deploy_options.append(os.path.join(con.THT_ENV_DIR,
206                                            'storage-environment.yaml'))
207     if ds_opts['sriov']:
208         prep_sriov_env(ds, tmp_dir)
209
210     # Check for 'k8s' here intentionally, as we may support other values
211     # such as openstack/openshift for 'vim' option.
212     if ds_opts['vim'] == 'k8s':
213         deploy_options.append('kubernetes-environment.yaml')
214
215     if virtual:
216         deploy_options.append('virtual-environment.yaml')
217     else:
218         deploy_options.append('baremetal-environment.yaml')
219
220     num_control, num_compute = inv.get_node_counts()
221     if num_control == 0 or num_compute == 0:
222         logging.error("Detected 0 control or compute nodes.  Control nodes: "
223                       "{}, compute nodes{}".format(num_control, num_compute))
224         raise ApexDeployException("Invalid number of control or computes")
225     elif num_control > 1 and not ds['global_params']['ha_enabled']:
226         num_control = 1
227     if platform.machine() == 'aarch64':
228         # aarch64 deploys were not completing in the default 90 mins.
229         # Not sure if this is related to the hardware the OOO support
230         # was developed on or the virtualization support in CentOS
231         # Either way it will probably get better over time  as the aarch
232         # support matures in CentOS and deploy time should be tested in
233         # the future so this multiplier can be removed.
234         con.DEPLOY_TIMEOUT *= 2
235     cmd = "openstack overcloud deploy --templates --timeout {} " \
236           .format(con.DEPLOY_TIMEOUT)
237     # build cmd env args
238     for option in deploy_options:
239         cmd += " -e {}".format(option)
240     cmd += " --ntp-server {}".format(ns['ntp'][0])
241     cmd += " --control-scale {}".format(num_control)
242     cmd += " --compute-scale {}".format(num_compute)
243     cmd += ' --control-flavor control --compute-flavor compute'
244     if net_data:
245         cmd += ' --networks-file network_data.yaml'
246     libvirt_type = 'kvm'
247     if virtual:
248         with open('/sys/module/kvm_intel/parameters/nested') as f:
249             nested_kvm = f.read().strip()
250             if nested_kvm != 'Y':
251                 libvirt_type = 'qemu'
252     cmd += ' --libvirt-type {}'.format(libvirt_type)
253     logging.info("Deploy command set: {}".format(cmd))
254
255     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
256         fh.write(cmd)
257     return cmd
258
259
260 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
261                patches=None):
262     """
263     Locates sdn image and preps for deployment.
264     :param ds: deploy settings
265     :param ns: network settings
266     :param img: sdn image
267     :param tmp_dir: dir to store modified sdn image
268     :param root_pw: password to configure for overcloud image
269     :param docker_tag: Docker image tag for RDO version (default None)
270     :param patches: List of patches to apply to overcloud image
271     :return: None
272     """
273     # TODO(trozet): Come up with a better way to organize this logic in this
274     # function
275     logging.info("Preparing image: {} for deployment".format(img))
276     if not os.path.isfile(img):
277         logging.error("Missing SDN image {}".format(img))
278         raise ApexDeployException("Missing SDN image file: {}".format(img))
279
280     ds_opts = ds['deploy_options']
281     virt_cmds = list()
282     sdn = ds_opts['sdn_controller']
283     patched_containers = set()
284     # we need this due to rhbz #1436021
285     # fixed in systemd-219-37.el7
286     if sdn is not False:
287         logging.info("Neutron openvswitch-agent disabled")
288         virt_cmds.extend([{
289             con.VIRT_RUN_CMD:
290                 "rm -f /etc/systemd/system/multi-user.target.wants/"
291                 "neutron-openvswitch-agent.service"},
292             {
293             con.VIRT_RUN_CMD:
294                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
295                 ".service"
296         }])
297
298     if ns.get('http_proxy', ''):
299         virt_cmds.append({
300             con.VIRT_RUN_CMD:
301                 "echo 'http_proxy={}' >> /etc/environment".format(
302                     ns['http_proxy'])})
303
304     if ns.get('https_proxy', ''):
305         virt_cmds.append({
306             con.VIRT_RUN_CMD:
307                 "echo 'https_proxy={}' >> /etc/environment".format(
308                     ns['https_proxy'])})
309
310     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
311     shutil.copyfile(img, tmp_oc_image)
312     logging.debug("Temporary overcloud image stored as: {}".format(
313         tmp_oc_image))
314
315     if ds_opts['vpn']:
316         oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
317         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
318         virt_cmds.append({
319             con.VIRT_RUN_CMD:
320                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
321                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
322         virt_cmds.append({
323             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
324                               "zrpcd_start.sh"})
325         virt_cmds.append({
326             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
327                               "init.d/zrpcd_start.sh' /etc/rc.local "})
328         virt_cmds.append({
329             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
330                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
331         logging.info("ZRPCD process started")
332
333     dataplane = ds_opts['dataplane']
334     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
335         logging.info("Enabling kernel modules for dpdk")
336         # file to module mapping
337         uio_types = {
338             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
339             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
340         }
341         for mod_file, mod in uio_types.items():
342             with open(mod_file, 'w') as fh:
343                 fh.write('#!/bin/bash\n')
344                 fh.write('exec /sbin/modprobe {}'.format(mod))
345                 fh.close()
346
347             virt_cmds.extend([
348                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
349                     mod_file)},
350                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
351                                    "{}".format(os.path.basename(mod_file))}
352             ])
353     if root_pw:
354         pw_op = "password:{}".format(root_pw)
355         virt_cmds.append({con.VIRT_PW: pw_op})
356
357     if dataplane == 'ovs':
358         if ds_opts['sfc']:
359             virt_cmds.extend([
360                 {con.VIRT_RUN_CMD: "yum -y install "
361                                    "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
362                                    "{}".format(OVS_NSH_KMOD_RPM)},
363                 {con.VIRT_RUN_CMD: "yum downgrade -y "
364                                    "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
365                                    "{}".format(OVS_NSH_RPM)}
366             ])
367         elif sdn == 'opendaylight':
368             # FIXME(trozet) remove this after RDO is updated with fix for
369             # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
370             ovs_file = os.path.basename(con.CUSTOM_OVS)
371             ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
372             utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
373                                             targets=[ovs_file])
374             virt_cmds.extend([
375                 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
376                                                                   ovs_file))},
377                 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
378                     ovs_file)}
379             ])
380     if dataplane == 'fdio':
381         # Patch neutron with using OVS external interface for router
382         # and add generic linux NS interface driver
383         virt_cmds.append(
384             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
385                                "-p1 < neutron-patch-NSDriver.patch"})
386         if sdn is False:
387             virt_cmds.extend([
388                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
389                 {con.VIRT_RUN_CMD: "yum install -y "
390                                    "/root/nosdn_vpp_rpms/*.rpm"}
391             ])
392
393     if sdn == 'opendaylight':
394         undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
395             'installer_vm']['ip']
396         oc_builder.inject_opendaylight(
397             odl_version=ds_opts['odl_version'],
398             image=tmp_oc_image,
399             tmp_dir=tmp_dir,
400             uc_ip=undercloud_admin_ip,
401             os_version=ds_opts['os_version'],
402             docker_tag=docker_tag,
403         )
404         if docker_tag:
405             patched_containers = patched_containers.union({'opendaylight'})
406
407     if patches:
408         if ds_opts['os_version'] == 'master':
409             branch = ds_opts['os_version']
410         else:
411             branch = "stable/{}".format(ds_opts['os_version'])
412         logging.info('Adding patches to overcloud')
413         patched_containers = patched_containers.union(
414             c_builder.add_upstream_patches(patches,
415                                            tmp_oc_image, tmp_dir,
416                                            branch,
417                                            uc_ip=undercloud_admin_ip,
418                                            docker_tag=docker_tag))
419     # if containers with ceph, and no ceph device we need to use a
420     # persistent loop device for Ceph OSDs
421     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
422         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
423         with open(tmp_losetup, 'w') as fh:
424             fh.write(LOSETUP_SERVICE)
425         virt_cmds.extend([
426             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
427              },
428             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
429                 .format(LOOP_DEVICE_SIZE)},
430             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
431             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
432         ])
433     # TODO(trozet) remove this after LP#173474 is fixed
434     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
435     virt_cmds.append(
436         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
437                            "ConditionPathExists".format(dhcp_unit)})
438     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
439     logging.info("Overcloud image customization complete")
440     return patched_containers
441
442
443 def make_ssh_key():
444     """
445     Creates public and private ssh keys with 1024 bit RSA encryption
446     :return: private, public key
447     """
448     key = rsa.generate_private_key(
449         backend=crypto_default_backend(),
450         public_exponent=65537,
451         key_size=1024
452     )
453
454     private_key = key.private_bytes(
455         crypto_serialization.Encoding.PEM,
456         crypto_serialization.PrivateFormat.PKCS8,
457         crypto_serialization.NoEncryption())
458     public_key = key.public_key().public_bytes(
459         crypto_serialization.Encoding.OpenSSH,
460         crypto_serialization.PublicFormat.OpenSSH
461     )
462     return private_key.decode('utf-8'), public_key.decode('utf-8')
463
464
465 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
466     """
467     Creates modified opnfv/network environments for deployment
468     :param ds: deploy settings
469     :param ns: network settings
470     :param inv: node inventory
471     :param opnfv_env: file path for opnfv-environment file
472     :param net_env: file path for network-environment file
473     :param tmp_dir: Apex tmp dir
474     :return:
475     """
476
477     logging.info("Preparing opnfv-environment and network-environment files")
478     ds_opts = ds['deploy_options']
479     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
480     shutil.copyfile(opnfv_env, tmp_opnfv_env)
481     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
482     tenant_nic = dict()
483     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
484     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
485     external_nic_map = ns['networks']['external'][0]['nic_mapping']
486     external_nic = dict()
487     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
488
489     # SSH keys
490     private_key, public_key = make_ssh_key()
491
492     # Make easier/faster variables to index in the file editor
493     if 'performance' in ds_opts:
494         perf = True
495         # vpp
496         if 'vpp' in ds_opts['performance']['Compute']:
497             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
498         else:
499             perf_vpp_comp = None
500         if 'vpp' in ds_opts['performance']['Controller']:
501             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
502         else:
503             perf_vpp_ctrl = None
504
505         # ovs
506         if 'ovs' in ds_opts['performance']['Compute']:
507             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
508         else:
509             perf_ovs_comp = None
510
511         # kernel
512         if 'kernel' in ds_opts['performance']['Compute']:
513             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
514         else:
515             perf_kern_comp = None
516     else:
517         perf = False
518
519     tenant_settings = ns['networks']['tenant']
520     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
521         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
522
523     # Modify OPNFV environment
524     # TODO: Change to build a dict and outputting yaml rather than parsing
525     for line in fileinput.input(tmp_opnfv_env, inplace=True):
526         line = line.strip('\n')
527         output_line = line
528         if 'CloudDomain' in line:
529             output_line = "  CloudDomain: {}".format(ns['domain_name'])
530         elif 'replace_private_key' in line:
531             output_line = "    private_key: |\n"
532             key_out = ''
533             for line in private_key.splitlines():
534                 key_out += "      {}\n".format(line)
535             output_line += key_out
536         elif 'replace_public_key' in line:
537             output_line = "    public_key: '{}'".format(public_key)
538         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
539                 'resource_registry' in line:
540             output_line = "resource_registry:\n" \
541                           "  OS::TripleO::NodeUserData: first-boot.yaml"
542         elif 'ComputeExtraConfigPre' in line and \
543                 ds_opts['dataplane'] == 'ovs_dpdk':
544             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
545                           './ovs-dpdk-preconfig.yaml'
546         elif 'NeutronNetworkVLANRanges' in line:
547             vlan_setting = ''
548             if tenant_vlan_enabled:
549                 if ns['networks']['tenant']['overlay_id_range']:
550                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
551                     if 'datacentre' not in vlan_setting:
552                         vlan_setting += ',datacentre:1:1000'
553             # SRIOV networks are VLAN based provider networks. In order to
554             # simplify the deployment, nfv_sriov will be the default physnet.
555             # VLANs are not needed in advance, and the user will have to create
556             # the network specifying the segmentation-id.
557             if ds_opts['sriov']:
558                 if vlan_setting:
559                     vlan_setting += ",nfv_sriov"
560                 else:
561                     vlan_setting = "datacentre:1:1000,nfv_sriov"
562             if vlan_setting:
563                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
564         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
565             if tenant_settings['overlay_id_range']:
566                 physnets = tenant_settings['overlay_id_range'].split(',')
567                 output_line = "  NeutronBridgeMappings: "
568                 for physnet in physnets:
569                     physnet_name = physnet.split(':')[0]
570                     if physnet_name != 'datacentre':
571                         output_line += "{}:br-vlan,".format(physnet_name)
572                 output_line += "datacentre:br-ex"
573         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
574                 and ds_opts['sdn_controller'] == 'opendaylight':
575             if tenant_settings['overlay_id_range']:
576                 physnets = tenant_settings['overlay_id_range'].split(',')
577                 output_line = "  OpenDaylightProviderMappings: "
578                 for physnet in physnets:
579                     physnet_name = physnet.split(':')[0]
580                     if physnet_name != 'datacentre':
581                         output_line += "{}:br-vlan,".format(physnet_name)
582                 output_line += "datacentre:br-ex"
583         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
584             output_line = "  NeutronNetworkType: vlan\n" \
585                           "  NeutronTunnelTypes: ''"
586
587         if ds_opts['sdn_controller'] == 'opendaylight' and \
588                 'odl_vpp_routing_node' in ds_opts:
589             if 'opendaylight::vpp_routing_node' in line:
590                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
591                                .format(ds_opts['odl_vpp_routing_node'],
592                                        ns['domain_name']))
593         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
594             if 'NeutronVPPAgentPhysnets' in line:
595                 # VPP interface tap0 will be used for external network
596                 # connectivity.
597                 output_line = ("  NeutronVPPAgentPhysnets: "
598                                "'datacentre:{},external:tap0'"
599                                .format(tenant_nic['Controller']))
600         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
601                 'dvr') is True:
602             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
603                 output_line = ''
604             elif 'NeutronDhcpAgentsPerNetwork' in line:
605                 num_control, num_compute = inv.get_node_counts()
606                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
607                                .format(num_compute))
608             elif 'ComputeServices' in line:
609                 output_line = ("  ComputeServices:\n"
610                                "    - OS::TripleO::Services::NeutronDhcpAgent")
611
612         if perf:
613             for role in 'NovaCompute', 'Controller':
614                 if role == 'NovaCompute':
615                     perf_opts = perf_vpp_comp
616                 else:
617                     perf_opts = perf_vpp_ctrl
618                 cfg = "{}ExtraConfig".format(role)
619                 if cfg in line and perf_opts:
620                     perf_line = ''
621                     if 'main-core' in perf_opts:
622                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
623                                       .format(perf_opts['main-core']))
624                     if 'corelist-workers' in perf_opts:
625                         perf_line += ("\n    "
626                                       "fdio::vpp_cpu_corelist_workers: '{}'"
627                                       .format(perf_opts['corelist-workers']))
628                     if ds_opts['sdn_controller'] == 'opendaylight' and \
629                             ds_opts['dataplane'] == 'fdio':
630                         if role == 'NovaCompute':
631                             perf_line += ("\n    "
632                                           "tripleo::profile::base::neutron::"
633                                           "agents::honeycomb::"
634                                           "interface_role_mapping:"
635                                           " ['{}:tenant-interface',"
636                                           "'{}:public-interface']"
637                                           .format(tenant_nic[role],
638                                                   external_nic[role]))
639                         else:
640                             perf_line += ("\n    "
641                                           "tripleo::profile::base::neutron::"
642                                           "agents::honeycomb::"
643                                           "interface_role_mapping:"
644                                           " ['{}:tenant-interface']"
645                                           .format(tenant_nic[role]))
646                     if perf_line:
647                         output_line = ("  {}:{}".format(cfg, perf_line))
648
649             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
650                 for k, v in OVS_PERF_MAP.items():
651                     if k in line and v in perf_ovs_comp:
652                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
653
654             # kernel args
655             # (FIXME) use compute's kernel settings for all nodes for now.
656             if perf_kern_comp:
657                 if 'NovaSchedulerDefaultFilters' in line:
658                     output_line = \
659                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
660                         "ComputeFilter,AvailabilityZoneFilter," \
661                         "ComputeCapabilitiesFilter," \
662                         "ImagePropertiesFilter,NUMATopologyFilter'"
663                 elif 'ComputeKernelArgs' in line:
664                     kernel_args = ''
665                     for k, v in perf_kern_comp.items():
666                         kernel_args += "{}={} ".format(k, v)
667                     if kernel_args:
668                         output_line = "  ComputeKernelArgs: '{}'".\
669                             format(kernel_args)
670
671         print(output_line)
672
673     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
674
675
676 def generate_ceph_key():
677     key = os.urandom(16)
678     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
679     return base64.b64encode(header + key)
680
681
682 def prep_storage_env(ds, ns, virtual, tmp_dir):
683     """
684     Creates storage environment file for deployment.  Source file is copied by
685     undercloud playbook to host.
686     :param ds:
687     :param ns:
688     :param virtual:
689     :param tmp_dir:
690     :return:
691     """
692     ds_opts = ds['deploy_options']
693     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
694     if not os.path.isfile(storage_file):
695         logging.error("storage-environment file is not in tmp directory: {}. "
696                       "Check if file was copied from "
697                       "undercloud".format(tmp_dir))
698         raise ApexDeployException("storage-environment file not copied from "
699                                   "undercloud")
700     for line in fileinput.input(storage_file, inplace=True):
701         line = line.strip('\n')
702         if 'CephClusterFSID' in line:
703             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
704         elif 'CephMonKey' in line:
705             print("  CephMonKey: {}".format(generate_ceph_key().decode(
706                 'utf-8')))
707         elif 'CephAdminKey' in line:
708             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
709                 'utf-8')))
710         elif 'CephClientKey' in line:
711             print("  CephClientKey: {}".format(generate_ceph_key().decode(
712                 'utf-8')))
713         else:
714             print(line)
715
716     if ds_opts['containers']:
717         ceph_params = {}
718
719         # max pgs allowed are calculated as num_mons * 200. Therefore we
720         # set number of pgs and pools so that the total will be less:
721         # num_pgs * num_pools * num_osds
722         ceph_params['CephPoolDefaultSize'] = 2
723         ceph_params['CephPoolDefaultPgNum'] = 32
724         if virtual:
725             ceph_params['CephAnsibleExtraConfig'] = {
726                 'centos_package_dependencies': [],
727                 'ceph_osd_docker_memory_limit': '1g',
728                 'ceph_mds_docker_memory_limit': '1g',
729             }
730         ceph_device = ds_opts['ceph_device']
731         ceph_params['CephAnsibleDisksConfig'] = {
732             'devices': [ceph_device],
733             'journal_size': 512,
734             'osd_scenario': 'collocated'
735         }
736         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
737     # TODO(trozet): remove following block as we only support containers now
738     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
739         with open(storage_file, 'a') as fh:
740             fh.write('  ExtraConfig:\n')
741             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
742                 ds_opts['ceph_device']
743             ))
744
745
746 def prep_sriov_env(ds, tmp_dir):
747     """
748     Creates SRIOV environment file for deployment. Source file is copied by
749     undercloud playbook to host.
750     :param ds:
751     :param tmp_dir:
752     :return:
753     """
754     ds_opts = ds['deploy_options']
755     sriov_iface = ds_opts['sriov']
756     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
757     if not os.path.isfile(sriov_file):
758         logging.error("sriov-environment file is not in tmp directory: {}. "
759                       "Check if file was copied from "
760                       "undercloud".format(tmp_dir))
761         raise ApexDeployException("sriov-environment file not copied from "
762                                   "undercloud")
763     # TODO(rnoriega): Instead of line editing, refactor this code to load
764     # yaml file into a dict, edit it and write the file back.
765     for line in fileinput.input(sriov_file, inplace=True):
766         line = line.strip('\n')
767         if 'NovaSchedulerDefaultFilters' in line:
768             print("  {}".format(line[3:]))
769         elif 'NovaSchedulerAvailableFilters' in line:
770             print("  {}".format(line[3:]))
771         elif 'NeutronPhysicalDevMappings' in line:
772             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
773                   .format(sriov_iface))
774         elif 'NeutronSriovNumVFs' in line:
775             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
776         elif 'NovaPCIPassthrough' in line:
777             print("  NovaPCIPassthrough:")
778         elif 'devname' in line:
779             print("    - devname: \"{}\"".format(sriov_iface))
780         elif 'physical_network' in line:
781             print("      physical_network: \"nfv_sriov\"")
782         else:
783             print(line)
784
785
786 def external_network_cmds(ns, ds):
787     """
788     Generates external network openstack commands
789     :param ns: network settings
790     :param ds: deploy settings
791     :return: list of commands to configure external network
792     """
793     ds_opts = ds['deploy_options']
794     external_physnet = 'datacentre'
795     if ds_opts['dataplane'] == 'fdio' and \
796        ds_opts['sdn_controller'] != 'opendaylight':
797         external_physnet = 'external'
798     if 'external' in ns.enabled_network_list:
799         net_config = ns['networks']['external'][0]
800         external = True
801         pool_start, pool_end = net_config['floating_ip_range']
802     else:
803         net_config = ns['networks']['admin']
804         external = False
805         pool_start, pool_end = ns['apex']['networks']['admin'][
806             'introspection_range']
807     nic_config = net_config['nic_mapping']
808     gateway = net_config['gateway']
809     cmds = list()
810     # create network command
811     if nic_config['compute']['vlan'] == 'native':
812         ext_type = 'flat'
813     else:
814         ext_type = "vlan --provider-segment {}".format(nic_config[
815                                                        'compute']['vlan'])
816     cmds.append("openstack network create external --project service "
817                 "--external --provider-network-type {} "
818                 "--provider-physical-network {}"
819                 .format(ext_type, external_physnet))
820     # create subnet command
821     cidr = net_config['cidr']
822     subnet_cmd = "openstack subnet create external-subnet --project " \
823                  "service --network external --no-dhcp --gateway {} " \
824                  "--allocation-pool start={},end={} --subnet-range " \
825                  "{}".format(gateway, pool_start, pool_end, str(cidr))
826     if external and cidr.version == 6:
827         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
828                       '--ipv6-address-mode slaac'
829     cmds.append(subnet_cmd)
830     logging.debug("Neutron external network commands determined "
831                   "as: {}".format(cmds))
832     return cmds
833
834
835 def create_congress_cmds(overcloud_file):
836     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
837     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
838     logging.info("Creating congress commands")
839     try:
840         ds_cfg = [
841             "username={}".format(overcloudrc['OS_USERNAME']),
842             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
843             "password={}".format(overcloudrc['OS_PASSWORD']),
844             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
845         ]
846     except KeyError:
847         logging.error("Unable to find all keys required for congress in "
848                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
849                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
850                       "file: {}".format(overcloud_file))
851         raise
852     cmds = list()
853     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
854
855     for driver in drivers:
856         if driver == 'doctor':
857             cmd = "{} \"{}\"".format(driver, driver)
858         else:
859             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
860         if driver == 'nova':
861             cmd += ' --config api_version="2.34"'
862         logging.debug("Congress command created: {}".format(cmd))
863         cmds.append(cmd)
864     return cmds