Merge "Enable SFC scenarios for Gambia"
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import pprint
16 import shutil
17 import uuid
18 import struct
19 import time
20 import yaml
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
23
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
30     crypto_serialization
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33     crypto_default_backend
34
35
36 SDN_FILE_MAP = {
37     'opendaylight': {
38         'sfc': 'neutron-sfc-opendaylight.yaml',
39         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40         'gluon': 'gluon.yaml',
41         'vpp': {
42             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44             'default': 'neutron-opendaylight-honeycomb.yaml'
45         },
46         'l2gw': 'neutron-l2gw-opendaylight.yaml',
47         'sriov': 'neutron-opendaylight-sriov.yaml',
48         'default': 'neutron-opendaylight.yaml',
49     },
50     'onos': {
51         'sfc': 'neutron-onos-sfc.yaml',
52         'default': 'neutron-onos.yaml'
53     },
54     'ovn': 'neutron-ml2-ovn.yaml',
55     False: {
56         'vpp': 'neutron-ml2-vpp.yaml',
57         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
58     }
59 }
60
61 OTHER_FILE_MAP = {
62     'tacker': 'enable_tacker.yaml',
63     'congress': 'enable_congress.yaml',
64     'barometer': 'enable_barometer.yaml',
65     'rt_kvm': 'enable_rt_kvm.yaml'
66 }
67
68 OVS_PERF_MAP = {
69     'HostCpusList': 'dpdk_cores',
70     'NeutronDpdkCoreList': 'pmd_cores',
71     'NeutronDpdkSocketMemory': 'socket_memory',
72     'NeutronDpdkMemoryChannels': 'memory_channels'
73 }
74
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95 DUPLICATE_COMPUTE_SERVICES = [
96     'OS::TripleO::Services::ComputeNeutronCorePlugin',
97     'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98     'OS::TripleO::Services::ComputeNeutronOvsAgent',
99     'OS::TripleO::Services::ComputeNeutronL3Agent'
100 ]
101
102
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
104     """
105     Builds a list of SDN environment files to be used in the deploy cmd.
106
107     This function recursively searches an sdn_map.  First the sdn controller is
108     matched and then the function looks for enabled features for that
109     controller to determine which environment files should be used.  By
110     default the feature will be checked if set to true in deploy settings to be
111     added to the list.  If a feature does not have a boolean value, then the
112     key and value pair to compare with are checked as a tuple (k,v).
113
114     :param ds: deploy settings
115     :param sdn_map: SDN map to recursively search
116     :param env_list: recursive var to hold previously found env_list
117     :return: A list of env files
118     """
119     if env_list is None:
120         env_list = list()
121     for k, v in sdn_map.items():
122         if ds['sdn_controller'] == k or (k in ds and ds[k]):
123             if isinstance(v, dict):
124                 # Append default SDN env file first
125                 # The assumption is that feature-enabled SDN env files
126                 # override and do not conflict with previously set default
127                 # settings
128                 if ds['sdn_controller'] == k and 'default' in v:
129                     env_list.append(os.path.join(con.THT_ENV_DIR,
130                                                  v['default']))
131                 env_list.extend(build_sdn_env_list(ds, v))
132             # check if the value is not a boolean
133             elif isinstance(v, tuple):
134                     if ds[k] == v[0]:
135                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
136             else:
137                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138     if len(env_list) == 0:
139         try:
140             env_list.append(os.path.join(
141                 con.THT_ENV_DIR, sdn_map['default']))
142         except KeyError:
143             logging.warning("Unable to find default file for SDN")
144
145     return env_list
146
147
148 def get_docker_sdn_files(ds_opts):
149     """
150     Returns docker env file for detected SDN
151     :param ds_opts: deploy options
152     :return: list of docker THT env files for an SDN
153     """
154     docker_services = con.VALID_DOCKER_SERVICES
155     tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
156     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157     for i, sdn_file in enumerate(sdn_env_list):
158         sdn_base = os.path.basename(sdn_file)
159         if sdn_base in docker_services:
160             if docker_services[sdn_base] is not None:
161                 sdn_env_list[i] = \
162                     os.path.join(tht_dir, docker_services[sdn_base])
163             else:
164                 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
165     return sdn_env_list
166
167
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169                       virtual, env_file='opnfv-environment.yaml',
170                       net_data=False):
171
172     logging.info("Creating deployment command")
173     deploy_options = ['network-environment.yaml']
174
175     ds_opts = ds['deploy_options']
176
177     if ds_opts['containers']:
178         deploy_options.append(os.path.join(con.THT_ENV_DIR,
179                                            'docker.yaml'))
180
181     if ds['global_params']['ha_enabled']:
182         if ds_opts['containers']:
183             deploy_options.append(os.path.join(con.THT_ENV_DIR,
184                                                'docker-ha.yaml'))
185         else:
186             deploy_options.append(os.path.join(con.THT_ENV_DIR,
187                                                'puppet-pacemaker.yaml'))
188
189     if env_file:
190         deploy_options.append(env_file)
191
192     if ds_opts['containers']:
193         deploy_options.append('docker-images.yaml')
194         sdn_docker_files = get_docker_sdn_files(ds_opts)
195         for sdn_docker_file in sdn_docker_files:
196             deploy_options.append(sdn_docker_file)
197         if sdn_docker_files:
198             deploy_options.append('sdn-images.yaml')
199     else:
200         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
201
202     for k, v in OTHER_FILE_MAP.items():
203         if k in ds_opts and ds_opts[k]:
204             if ds_opts['containers']:
205                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
206                                                    "{}.yaml".format(k)))
207             else:
208                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
209
210     if ds_opts['ceph'] and 'csit' not in env_file:
211         prep_storage_env(ds, ns, virtual, tmp_dir)
212         deploy_options.append(os.path.join(con.THT_ENV_DIR,
213                                            'storage-environment.yaml'))
214     if ds_opts['sriov']:
215         prep_sriov_env(ds, tmp_dir)
216
217     # Check for 'k8s' here intentionally, as we may support other values
218     # such as openstack/openshift for 'vim' option.
219     if ds_opts['vim'] == 'k8s':
220         deploy_options.append('kubernetes-environment.yaml')
221
222     if virtual:
223         deploy_options.append('virtual-environment.yaml')
224     else:
225         deploy_options.append('baremetal-environment.yaml')
226
227     num_control, num_compute = inv.get_node_counts()
228     if num_control > 1 and not ds['global_params']['ha_enabled']:
229         num_control = 1
230     if platform.machine() == 'aarch64':
231         # aarch64 deploys were not completing in the default 90 mins.
232         # Not sure if this is related to the hardware the OOO support
233         # was developed on or the virtualization support in CentOS
234         # Either way it will probably get better over time  as the aarch
235         # support matures in CentOS and deploy time should be tested in
236         # the future so this multiplier can be removed.
237         con.DEPLOY_TIMEOUT *= 2
238     cmd = "openstack overcloud deploy --templates --timeout {} " \
239           .format(con.DEPLOY_TIMEOUT)
240     # build cmd env args
241     for option in deploy_options:
242         cmd += " -e {}".format(option)
243     cmd += " --ntp-server {}".format(ns['ntp'][0])
244     cmd += " --control-scale {}".format(num_control)
245     cmd += " --compute-scale {}".format(num_compute)
246     cmd += ' --control-flavor control --compute-flavor compute'
247     if net_data:
248         cmd += ' --networks-file network_data.yaml'
249     libvirt_type = 'kvm'
250     if virtual:
251         with open('/sys/module/kvm_intel/parameters/nested') as f:
252             nested_kvm = f.read().strip()
253             if nested_kvm != 'Y':
254                 libvirt_type = 'qemu'
255     cmd += ' --libvirt-type {}'.format(libvirt_type)
256     logging.info("Deploy command set: {}".format(cmd))
257
258     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
259         fh.write(cmd)
260     return cmd
261
262
263 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
264                patches=None):
265     """
266     Locates sdn image and preps for deployment.
267     :param ds: deploy settings
268     :param ns: network settings
269     :param img: sdn image
270     :param tmp_dir: dir to store modified sdn image
271     :param root_pw: password to configure for overcloud image
272     :param docker_tag: Docker image tag for RDO version (default None)
273     :param patches: List of patches to apply to overcloud image
274     :return: None
275     """
276     # TODO(trozet): Come up with a better way to organize this logic in this
277     # function
278     logging.info("Preparing image: {} for deployment".format(img))
279     if not os.path.isfile(img):
280         logging.error("Missing SDN image {}".format(img))
281         raise ApexDeployException("Missing SDN image file: {}".format(img))
282
283     ds_opts = ds['deploy_options']
284     virt_cmds = list()
285     sdn = ds_opts['sdn_controller']
286     patched_containers = set()
287     # we need this due to rhbz #1436021
288     # fixed in systemd-219-37.el7
289     if sdn is not False:
290         logging.info("Neutron openvswitch-agent disabled")
291         virt_cmds.extend([{
292             con.VIRT_RUN_CMD:
293                 "rm -f /etc/systemd/system/multi-user.target.wants/"
294                 "neutron-openvswitch-agent.service"},
295             {
296             con.VIRT_RUN_CMD:
297                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
298                 ".service"
299         }])
300
301     if ns.get('http_proxy', ''):
302         virt_cmds.append({
303             con.VIRT_RUN_CMD:
304                 "echo 'http_proxy={}' >> /etc/environment".format(
305                     ns['http_proxy'])})
306
307     if ns.get('https_proxy', ''):
308         virt_cmds.append({
309             con.VIRT_RUN_CMD:
310                 "echo 'https_proxy={}' >> /etc/environment".format(
311                     ns['https_proxy'])})
312
313     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
314     shutil.copyfile(img, tmp_oc_image)
315     logging.debug("Temporary overcloud image stored as: {}".format(
316         tmp_oc_image))
317
318     if ds_opts['vpn']:
319         oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
320         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
321         virt_cmds.append({
322             con.VIRT_RUN_CMD:
323                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
324                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
325         virt_cmds.append({
326             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
327                               "zrpcd_start.sh"})
328         virt_cmds.append({
329             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
330                               "init.d/zrpcd_start.sh' /etc/rc.local "})
331         virt_cmds.append({
332             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
333                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
334         logging.info("ZRPCD process started")
335
336     dataplane = ds_opts['dataplane']
337     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
338         logging.info("Enabling kernel modules for dpdk")
339         # file to module mapping
340         uio_types = {
341             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
342             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
343         }
344         for mod_file, mod in uio_types.items():
345             with open(mod_file, 'w') as fh:
346                 fh.write('#!/bin/bash\n')
347                 fh.write('exec /sbin/modprobe {}'.format(mod))
348                 fh.close()
349
350             virt_cmds.extend([
351                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
352                     mod_file)},
353                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
354                                    "{}".format(os.path.basename(mod_file))}
355             ])
356     if root_pw:
357         pw_op = "password:{}".format(root_pw)
358         virt_cmds.append({con.VIRT_PW: pw_op})
359
360     if dataplane == 'ovs':
361         if ds_opts['sfc']:
362             oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
363
364     if dataplane == 'fdio':
365         # Patch neutron with using OVS external interface for router
366         # and add generic linux NS interface driver
367         virt_cmds.append(
368             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
369                                "-p1 < neutron-patch-NSDriver.patch"})
370         if sdn is False:
371             virt_cmds.extend([
372                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
373                 {con.VIRT_RUN_CMD: "yum install -y "
374                                    "/root/nosdn_vpp_rpms/*.rpm"}
375             ])
376
377     undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
378         'installer_vm']['ip']
379     if sdn == 'opendaylight':
380         oc_builder.inject_opendaylight(
381             odl_version=ds_opts['odl_version'],
382             image=tmp_oc_image,
383             tmp_dir=tmp_dir,
384             uc_ip=undercloud_admin_ip,
385             os_version=ds_opts['os_version'],
386             docker_tag=docker_tag,
387         )
388         if docker_tag:
389             patched_containers = patched_containers.union({'opendaylight'})
390
391     if patches:
392         if ds_opts['os_version'] == 'master':
393             branch = ds_opts['os_version']
394         else:
395             branch = "stable/{}".format(ds_opts['os_version'])
396         logging.info('Adding patches to overcloud')
397         patched_containers = patched_containers.union(
398             c_builder.add_upstream_patches(patches,
399                                            tmp_oc_image, tmp_dir,
400                                            branch,
401                                            uc_ip=undercloud_admin_ip,
402                                            docker_tag=docker_tag))
403     # if containers with ceph, and no ceph device we need to use a
404     # persistent loop device for Ceph OSDs
405     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
406         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
407         with open(tmp_losetup, 'w') as fh:
408             fh.write(LOSETUP_SERVICE)
409         virt_cmds.extend([
410             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
411              },
412             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
413                 .format(LOOP_DEVICE_SIZE)},
414             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
415             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
416         ])
417     # TODO(trozet) remove this after LP#173474 is fixed
418     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
419     virt_cmds.append(
420         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
421                            "ConditionPathExists".format(dhcp_unit)})
422     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
423     logging.info("Overcloud image customization complete")
424     return patched_containers
425
426
427 def make_ssh_key():
428     """
429     Creates public and private ssh keys with 1024 bit RSA encryption
430     :return: private, public key
431     """
432     key = rsa.generate_private_key(
433         backend=crypto_default_backend(),
434         public_exponent=65537,
435         key_size=1024
436     )
437
438     private_key = key.private_bytes(
439         crypto_serialization.Encoding.PEM,
440         crypto_serialization.PrivateFormat.PKCS8,
441         crypto_serialization.NoEncryption())
442     public_key = key.public_key().public_bytes(
443         crypto_serialization.Encoding.OpenSSH,
444         crypto_serialization.PublicFormat.OpenSSH
445     )
446     return private_key.decode('utf-8'), public_key.decode('utf-8')
447
448
449 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
450     """
451     Creates modified opnfv/network environments for deployment
452     :param ds: deploy settings
453     :param ns: network settings
454     :param inv: node inventory
455     :param opnfv_env: file path for opnfv-environment file
456     :param net_env: file path for network-environment file
457     :param tmp_dir: Apex tmp dir
458     :return:
459     """
460
461     logging.info("Preparing opnfv-environment and network-environment files")
462     ds_opts = ds['deploy_options']
463     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
464     shutil.copyfile(opnfv_env, tmp_opnfv_env)
465     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
466     tenant_nic = dict()
467     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
468     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
469     external_nic_map = ns['networks']['external'][0]['nic_mapping']
470     external_nic = dict()
471     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
472
473     # SSH keys
474     private_key, public_key = make_ssh_key()
475
476     num_control, num_compute = inv.get_node_counts()
477     if num_control > 1 and not ds['global_params']['ha_enabled']:
478         num_control = 1
479
480     # Make easier/faster variables to index in the file editor
481     if 'performance' in ds_opts:
482         perf = True
483         # vpp
484         if 'vpp' in ds_opts['performance']['Compute']:
485             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
486         else:
487             perf_vpp_comp = None
488         if 'vpp' in ds_opts['performance']['Controller']:
489             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
490         else:
491             perf_vpp_ctrl = None
492
493         # ovs
494         if 'ovs' in ds_opts['performance']['Compute']:
495             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
496         else:
497             perf_ovs_comp = None
498
499         # kernel
500         if 'kernel' in ds_opts['performance']['Compute']:
501             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
502         else:
503             perf_kern_comp = None
504     else:
505         perf = False
506
507     tenant_settings = ns['networks']['tenant']
508     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
509         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
510
511     # Modify OPNFV environment
512     # TODO: Change to build a dict and outputting yaml rather than parsing
513     for line in fileinput.input(tmp_opnfv_env, inplace=True):
514         line = line.strip('\n')
515         output_line = line
516         if 'CloudDomain' in line:
517             output_line = "  CloudDomain: {}".format(ns['domain_name'])
518         elif 'replace_private_key' in line:
519             output_line = "    private_key: |\n"
520             key_out = ''
521             for line in private_key.splitlines():
522                 key_out += "      {}\n".format(line)
523             output_line += key_out
524         elif 'replace_public_key' in line:
525             output_line = "    public_key: '{}'".format(public_key)
526         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
527                 'resource_registry' in line:
528             output_line = "resource_registry:\n" \
529                           "  OS::TripleO::NodeUserData: first-boot.yaml"
530         elif 'ComputeExtraConfigPre' in line and \
531                 ds_opts['dataplane'] == 'ovs_dpdk':
532             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
533                           './ovs-dpdk-preconfig.yaml'
534         elif 'NeutronNetworkVLANRanges' in line:
535             vlan_setting = ''
536             if tenant_vlan_enabled:
537                 if ns['networks']['tenant']['overlay_id_range']:
538                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
539                     if 'datacentre' not in vlan_setting:
540                         vlan_setting += ',datacentre:1:1000'
541             # SRIOV networks are VLAN based provider networks. In order to
542             # simplify the deployment, nfv_sriov will be the default physnet.
543             # VLANs are not needed in advance, and the user will have to create
544             # the network specifying the segmentation-id.
545             if ds_opts['sriov']:
546                 if vlan_setting:
547                     vlan_setting += ",nfv_sriov"
548                 else:
549                     vlan_setting = "datacentre:1:1000,nfv_sriov"
550             if vlan_setting:
551                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
552         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
553             if tenant_settings['overlay_id_range']:
554                 physnets = tenant_settings['overlay_id_range'].split(',')
555                 output_line = "  NeutronBridgeMappings: "
556                 for physnet in physnets:
557                     physnet_name = physnet.split(':')[0]
558                     if physnet_name != 'datacentre':
559                         output_line += "{}:br-vlan,".format(physnet_name)
560                 output_line += "datacentre:br-ex"
561         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
562                 and ds_opts['sdn_controller'] == 'opendaylight':
563             if tenant_settings['overlay_id_range']:
564                 physnets = tenant_settings['overlay_id_range'].split(',')
565                 output_line = "  OpenDaylightProviderMappings: "
566                 for physnet in physnets:
567                     physnet_name = physnet.split(':')[0]
568                     if physnet_name != 'datacentre':
569                         output_line += "{}:br-vlan,".format(physnet_name)
570                 output_line += "datacentre:br-ex"
571         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
572             output_line = "  NeutronNetworkType: vlan\n" \
573                           "  NeutronTunnelTypes: ''"
574
575         if ds_opts['sdn_controller'] == 'opendaylight' and \
576                 'odl_vpp_routing_node' in ds_opts:
577             if 'opendaylight::vpp_routing_node' in line:
578                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
579                                .format(ds_opts['odl_vpp_routing_node'],
580                                        ns['domain_name']))
581         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
582             if 'NeutronVPPAgentPhysnets' in line:
583                 # VPP interface tap0 will be used for external network
584                 # connectivity.
585                 output_line = ("  NeutronVPPAgentPhysnets: "
586                                "'datacentre:{},external:tap0'"
587                                .format(tenant_nic['Controller']))
588         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
589                 'dvr') is True:
590             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
591                 output_line = ''
592             elif 'NeutronDhcpAgentsPerNetwork' in line:
593                 if num_compute == 0:
594                     num_dhcp_agents = num_control
595                 else:
596                     num_dhcp_agents = num_compute
597                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
598                                .format(num_dhcp_agents))
599             elif 'ComputeServices' in line:
600                 output_line = ("  ComputeServices:\n"
601                                "    - OS::TripleO::Services::NeutronDhcpAgent")
602
603         if perf:
604             for role in 'NovaCompute', 'Controller':
605                 if role == 'NovaCompute':
606                     perf_opts = perf_vpp_comp
607                 else:
608                     perf_opts = perf_vpp_ctrl
609                 cfg = "{}ExtraConfig".format(role)
610                 if cfg in line and perf_opts:
611                     perf_line = ''
612                     if 'main-core' in perf_opts:
613                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
614                                       .format(perf_opts['main-core']))
615                     if 'corelist-workers' in perf_opts:
616                         perf_line += ("\n    "
617                                       "fdio::vpp_cpu_corelist_workers: '{}'"
618                                       .format(perf_opts['corelist-workers']))
619                     if ds_opts['sdn_controller'] == 'opendaylight' and \
620                             ds_opts['dataplane'] == 'fdio':
621                         if role == 'NovaCompute':
622                             perf_line += ("\n    "
623                                           "tripleo::profile::base::neutron::"
624                                           "agents::honeycomb::"
625                                           "interface_role_mapping:"
626                                           " ['{}:tenant-interface',"
627                                           "'{}:public-interface']"
628                                           .format(tenant_nic[role],
629                                                   external_nic[role]))
630                         else:
631                             perf_line += ("\n    "
632                                           "tripleo::profile::base::neutron::"
633                                           "agents::honeycomb::"
634                                           "interface_role_mapping:"
635                                           " ['{}:tenant-interface']"
636                                           .format(tenant_nic[role]))
637                     if perf_line:
638                         output_line = ("  {}:{}".format(cfg, perf_line))
639
640             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
641                 for k, v in OVS_PERF_MAP.items():
642                     if k in line and v in perf_ovs_comp:
643                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
644
645             # kernel args
646             # (FIXME) use compute's kernel settings for all nodes for now.
647             if perf_kern_comp:
648                 if 'NovaSchedulerDefaultFilters' in line:
649                     output_line = \
650                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
651                         "ComputeFilter,AvailabilityZoneFilter," \
652                         "ComputeCapabilitiesFilter," \
653                         "ImagePropertiesFilter,NUMATopologyFilter'"
654                 elif 'ComputeKernelArgs' in line:
655                     kernel_args = ''
656                     for k, v in perf_kern_comp.items():
657                         kernel_args += "{}={} ".format(k, v)
658                     if kernel_args:
659                         output_line = "  ComputeKernelArgs: '{}'".\
660                             format(kernel_args)
661
662         print(output_line)
663
664     # Merge compute services into control services if only a single
665     # node deployment
666     if num_compute == 0:
667         logging.info("All in one deployment. Checking if service merging "
668                      "required into control services")
669         with open(tmp_opnfv_env, 'r') as fh:
670             data = yaml.safe_load(fh)
671         param_data = data['parameter_defaults']
672         # Check to see if any parameters are set for Compute
673         for param in param_data.keys():
674             if param != 'ComputeServices' and param.startswith('Compute'):
675                 logging.warning("Compute parameter set, but will not be used "
676                                 "in deployment: {}. Please use Controller "
677                                 "based parameters when using All-in-one "
678                                 "deployments".format(param))
679         if ('ControllerServices' in param_data and 'ComputeServices' in
680                 param_data):
681             logging.info("Services detected in environment file. Merging...")
682             ctrl_services = param_data['ControllerServices']
683             cmp_services = param_data['ComputeServices']
684             param_data['ControllerServices'] = list(set().union(
685                 ctrl_services, cmp_services))
686             for dup_service in DUPLICATE_COMPUTE_SERVICES:
687                 if dup_service in param_data['ControllerServices']:
688                     param_data['ControllerServices'].remove(dup_service)
689             param_data.pop('ComputeServices')
690             logging.debug("Merged controller services: {}".format(
691                 pprint.pformat(param_data['ControllerServices'])
692             ))
693             with open(tmp_opnfv_env, 'w') as fh:
694                 yaml.safe_dump(data, fh, default_flow_style=False)
695         else:
696             logging.info("No services detected in env file, not merging "
697                          "services")
698
699     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
700     with open(tmp_opnfv_env, 'r') as fh:
701         logging.debug("opnfv-environment content is : {}".format(
702             pprint.pformat(yaml.safe_load(fh.read()))
703         ))
704
705
706 def generate_ceph_key():
707     key = os.urandom(16)
708     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
709     return base64.b64encode(header + key)
710
711
712 def prep_storage_env(ds, ns, virtual, tmp_dir):
713     """
714     Creates storage environment file for deployment.  Source file is copied by
715     undercloud playbook to host.
716     :param ds:
717     :param ns:
718     :param virtual:
719     :param tmp_dir:
720     :return:
721     """
722     ds_opts = ds['deploy_options']
723     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
724     if not os.path.isfile(storage_file):
725         logging.error("storage-environment file is not in tmp directory: {}. "
726                       "Check if file was copied from "
727                       "undercloud".format(tmp_dir))
728         raise ApexDeployException("storage-environment file not copied from "
729                                   "undercloud")
730     for line in fileinput.input(storage_file, inplace=True):
731         line = line.strip('\n')
732         if 'CephClusterFSID' in line:
733             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
734         elif 'CephMonKey' in line:
735             print("  CephMonKey: {}".format(generate_ceph_key().decode(
736                 'utf-8')))
737         elif 'CephAdminKey' in line:
738             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
739                 'utf-8')))
740         elif 'CephClientKey' in line:
741             print("  CephClientKey: {}".format(generate_ceph_key().decode(
742                 'utf-8')))
743         else:
744             print(line)
745
746     if ds_opts['containers']:
747         ceph_params = {}
748
749         # max pgs allowed are calculated as num_mons * 200. Therefore we
750         # set number of pgs and pools so that the total will be less:
751         # num_pgs * num_pools * num_osds
752         ceph_params['CephPoolDefaultSize'] = 2
753         ceph_params['CephPoolDefaultPgNum'] = 32
754         if virtual:
755             ceph_params['CephAnsibleExtraConfig'] = {
756                 'centos_package_dependencies': [],
757                 'ceph_osd_docker_memory_limit': '1g',
758                 'ceph_mds_docker_memory_limit': '1g',
759             }
760         ceph_device = ds_opts['ceph_device']
761         ceph_params['CephAnsibleDisksConfig'] = {
762             'devices': [ceph_device],
763             'journal_size': 512,
764             'osd_scenario': 'collocated'
765         }
766         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
767     # TODO(trozet): remove following block as we only support containers now
768     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
769         with open(storage_file, 'a') as fh:
770             fh.write('  ExtraConfig:\n')
771             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
772                 ds_opts['ceph_device']
773             ))
774
775
776 def prep_sriov_env(ds, tmp_dir):
777     """
778     Creates SRIOV environment file for deployment. Source file is copied by
779     undercloud playbook to host.
780     :param ds:
781     :param tmp_dir:
782     :return:
783     """
784     ds_opts = ds['deploy_options']
785     sriov_iface = ds_opts['sriov']
786     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
787     if not os.path.isfile(sriov_file):
788         logging.error("sriov-environment file is not in tmp directory: {}. "
789                       "Check if file was copied from "
790                       "undercloud".format(tmp_dir))
791         raise ApexDeployException("sriov-environment file not copied from "
792                                   "undercloud")
793     # TODO(rnoriega): Instead of line editing, refactor this code to load
794     # yaml file into a dict, edit it and write the file back.
795     for line in fileinput.input(sriov_file, inplace=True):
796         line = line.strip('\n')
797         if 'NovaSchedulerDefaultFilters' in line:
798             print("  {}".format(line[3:]))
799         elif 'NovaSchedulerAvailableFilters' in line:
800             print("  {}".format(line[3:]))
801         elif 'NeutronPhysicalDevMappings' in line:
802             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
803                   .format(sriov_iface))
804         elif 'NeutronSriovNumVFs' in line:
805             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
806         elif 'NovaPCIPassthrough' in line:
807             print("  NovaPCIPassthrough:")
808         elif 'devname' in line:
809             print("    - devname: \"{}\"".format(sriov_iface))
810         elif 'physical_network' in line:
811             print("      physical_network: \"nfv_sriov\"")
812         else:
813             print(line)
814
815
816 def external_network_cmds(ns, ds):
817     """
818     Generates external network openstack commands
819     :param ns: network settings
820     :param ds: deploy settings
821     :return: list of commands to configure external network
822     """
823     ds_opts = ds['deploy_options']
824     external_physnet = 'datacentre'
825     if ds_opts['dataplane'] == 'fdio' and \
826        ds_opts['sdn_controller'] != 'opendaylight':
827         external_physnet = 'external'
828     if 'external' in ns.enabled_network_list:
829         net_config = ns['networks']['external'][0]
830         external = True
831         pool_start, pool_end = net_config['floating_ip_range']
832     else:
833         net_config = ns['networks']['admin']
834         external = False
835         pool_start, pool_end = ns['apex']['networks']['admin'][
836             'introspection_range']
837     nic_config = net_config['nic_mapping']
838     gateway = net_config['gateway']
839     cmds = list()
840     # create network command
841     if nic_config['compute']['vlan'] == 'native':
842         ext_type = 'flat'
843     else:
844         ext_type = "vlan --provider-segment {}".format(nic_config[
845                                                        'compute']['vlan'])
846     cmds.append("openstack network create external --project service "
847                 "--external --provider-network-type {} "
848                 "--provider-physical-network {}"
849                 .format(ext_type, external_physnet))
850     # create subnet command
851     cidr = net_config['cidr']
852     subnet_cmd = "openstack subnet create external-subnet --project " \
853                  "service --network external --no-dhcp --gateway {} " \
854                  "--allocation-pool start={},end={} --subnet-range " \
855                  "{}".format(gateway, pool_start, pool_end, str(cidr))
856     if external and cidr.version == 6:
857         subnet_cmd += ' --ip-version 6'
858     cmds.append(subnet_cmd)
859     logging.debug("Neutron external network commands determined "
860                   "as: {}".format(cmds))
861     return cmds
862
863
864 def create_congress_cmds(overcloud_file):
865     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
866     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
867     logging.info("Creating congress commands")
868     try:
869         ds_cfg = [
870             "username={}".format(overcloudrc['OS_USERNAME']),
871             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
872             "password={}".format(overcloudrc['OS_PASSWORD']),
873             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
874         ]
875     except KeyError:
876         logging.error("Unable to find all keys required for congress in "
877                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
878                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
879                       "file: {}".format(overcloud_file))
880         raise
881     cmds = list()
882     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
883
884     for driver in drivers:
885         if driver == 'doctor':
886             cmd = "{} \"{}\"".format(driver, driver)
887         else:
888             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
889         if driver == 'nova':
890             cmd += ' --config api_version="2.34"'
891         logging.debug("Congress command created: {}".format(cmd))
892         cmds.append(cmd)
893     return cmds