Recover fix to install custom OVS
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import pprint
16 import shutil
17 import uuid
18 import struct
19 import time
20 import yaml
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
23
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
30     crypto_serialization
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33     crypto_default_backend
34
35
36 SDN_FILE_MAP = {
37     'opendaylight': {
38         'sfc': 'neutron-sfc-opendaylight.yaml',
39         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40         'gluon': 'gluon.yaml',
41         'vpp': {
42             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44             'default': 'neutron-opendaylight-honeycomb.yaml'
45         },
46         'l2gw': 'neutron-l2gw-opendaylight.yaml',
47         'sriov': 'neutron-opendaylight-sriov.yaml',
48         'default': 'neutron-opendaylight.yaml',
49     },
50     'onos': {
51         'sfc': 'neutron-onos-sfc.yaml',
52         'default': 'neutron-onos.yaml'
53     },
54     'ovn': 'neutron-ml2-ovn.yaml',
55     False: {
56         'vpp': 'neutron-ml2-vpp.yaml',
57         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
58     }
59 }
60
61 OTHER_FILE_MAP = {
62     'tacker': 'enable_tacker.yaml',
63     'congress': 'enable_congress.yaml',
64     'barometer': 'enable_barometer.yaml',
65     'rt_kvm': 'enable_rt_kvm.yaml'
66 }
67
68 OVS_PERF_MAP = {
69     'HostCpusList': 'dpdk_cores',
70     'NeutronDpdkCoreList': 'pmd_cores',
71     'NeutronDpdkSocketMemory': 'socket_memory',
72     'NeutronDpdkMemoryChannels': 'memory_channels'
73 }
74
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95 DUPLICATE_COMPUTE_SERVICES = [
96     'OS::TripleO::Services::ComputeNeutronCorePlugin',
97     'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98     'OS::TripleO::Services::ComputeNeutronOvsAgent',
99     'OS::TripleO::Services::ComputeNeutronL3Agent'
100 ]
101
102
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
104     """
105     Builds a list of SDN environment files to be used in the deploy cmd.
106
107     This function recursively searches an sdn_map.  First the sdn controller is
108     matched and then the function looks for enabled features for that
109     controller to determine which environment files should be used.  By
110     default the feature will be checked if set to true in deploy settings to be
111     added to the list.  If a feature does not have a boolean value, then the
112     key and value pair to compare with are checked as a tuple (k,v).
113
114     :param ds: deploy settings
115     :param sdn_map: SDN map to recursively search
116     :param env_list: recursive var to hold previously found env_list
117     :return: A list of env files
118     """
119     if env_list is None:
120         env_list = list()
121     for k, v in sdn_map.items():
122         if ds['sdn_controller'] == k or (k in ds and ds[k]):
123             if isinstance(v, dict):
124                 # Append default SDN env file first
125                 # The assumption is that feature-enabled SDN env files
126                 # override and do not conflict with previously set default
127                 # settings
128                 if ds['sdn_controller'] == k and 'default' in v:
129                     env_list.append(os.path.join(con.THT_ENV_DIR,
130                                                  v['default']))
131                 env_list.extend(build_sdn_env_list(ds, v))
132             # check if the value is not a boolean
133             elif isinstance(v, tuple):
134                     if ds[k] == v[0]:
135                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
136             else:
137                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138     if len(env_list) == 0:
139         try:
140             env_list.append(os.path.join(
141                 con.THT_ENV_DIR, sdn_map['default']))
142         except KeyError:
143             logging.warning("Unable to find default file for SDN")
144
145     return env_list
146
147
148 def get_docker_sdn_files(ds_opts):
149     """
150     Returns docker env file for detected SDN
151     :param ds_opts: deploy options
152     :return: list of docker THT env files for an SDN
153     """
154     docker_services = con.VALID_DOCKER_SERVICES
155     tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
156     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157     for i, sdn_file in enumerate(sdn_env_list):
158         sdn_base = os.path.basename(sdn_file)
159         if sdn_base in docker_services:
160             if docker_services[sdn_base] is not None:
161                 sdn_env_list[i] = \
162                     os.path.join(tht_dir, docker_services[sdn_base])
163             else:
164                 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
165     return sdn_env_list
166
167
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169                       virtual, env_file='opnfv-environment.yaml',
170                       net_data=False):
171
172     logging.info("Creating deployment command")
173     deploy_options = ['network-environment.yaml']
174
175     ds_opts = ds['deploy_options']
176
177     if ds_opts['containers']:
178         deploy_options.append(os.path.join(con.THT_ENV_DIR,
179                                            'docker.yaml'))
180
181     if ds['global_params']['ha_enabled']:
182         if ds_opts['containers']:
183             deploy_options.append(os.path.join(con.THT_ENV_DIR,
184                                                'docker-ha.yaml'))
185         else:
186             deploy_options.append(os.path.join(con.THT_ENV_DIR,
187                                                'puppet-pacemaker.yaml'))
188
189     if env_file:
190         deploy_options.append(env_file)
191
192     if ds_opts['containers']:
193         deploy_options.append('docker-images.yaml')
194         sdn_docker_files = get_docker_sdn_files(ds_opts)
195         for sdn_docker_file in sdn_docker_files:
196             deploy_options.append(sdn_docker_file)
197         if sdn_docker_files:
198             deploy_options.append('sdn-images.yaml')
199     else:
200         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
201
202     for k, v in OTHER_FILE_MAP.items():
203         if k in ds_opts and ds_opts[k]:
204             if ds_opts['containers']:
205                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
206                                                    "{}.yaml".format(k)))
207             else:
208                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
209
210     if ds_opts['ceph'] and 'csit' not in env_file:
211         prep_storage_env(ds, ns, virtual, tmp_dir)
212         deploy_options.append(os.path.join(con.THT_ENV_DIR,
213                                            'storage-environment.yaml'))
214     if ds_opts['sriov']:
215         prep_sriov_env(ds, tmp_dir)
216
217     # Check for 'k8s' here intentionally, as we may support other values
218     # such as openstack/openshift for 'vim' option.
219     if ds_opts['vim'] == 'k8s':
220         deploy_options.append('kubernetes-environment.yaml')
221
222     if virtual:
223         deploy_options.append('virtual-environment.yaml')
224     else:
225         deploy_options.append('baremetal-environment.yaml')
226
227     num_control, num_compute = inv.get_node_counts()
228     if num_control > 1 and not ds['global_params']['ha_enabled']:
229         num_control = 1
230     if platform.machine() == 'aarch64':
231         # aarch64 deploys were not completing in the default 90 mins.
232         # Not sure if this is related to the hardware the OOO support
233         # was developed on or the virtualization support in CentOS
234         # Either way it will probably get better over time  as the aarch
235         # support matures in CentOS and deploy time should be tested in
236         # the future so this multiplier can be removed.
237         con.DEPLOY_TIMEOUT *= 2
238     cmd = "openstack overcloud deploy --templates --timeout {} " \
239           .format(con.DEPLOY_TIMEOUT)
240     # build cmd env args
241     for option in deploy_options:
242         cmd += " -e {}".format(option)
243     cmd += " --ntp-server {}".format(ns['ntp'][0])
244     cmd += " --control-scale {}".format(num_control)
245     cmd += " --compute-scale {}".format(num_compute)
246     cmd += ' --control-flavor control --compute-flavor compute'
247     if net_data:
248         cmd += ' --networks-file network_data.yaml'
249     libvirt_type = 'kvm'
250     if virtual:
251         with open('/sys/module/kvm_intel/parameters/nested') as f:
252             nested_kvm = f.read().strip()
253             if nested_kvm != 'Y':
254                 libvirt_type = 'qemu'
255     cmd += ' --libvirt-type {}'.format(libvirt_type)
256     logging.info("Deploy command set: {}".format(cmd))
257
258     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
259         fh.write(cmd)
260     return cmd
261
262
263 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
264                patches=None):
265     """
266     Locates sdn image and preps for deployment.
267     :param ds: deploy settings
268     :param ns: network settings
269     :param img: sdn image
270     :param tmp_dir: dir to store modified sdn image
271     :param root_pw: password to configure for overcloud image
272     :param docker_tag: Docker image tag for RDO version (default None)
273     :param patches: List of patches to apply to overcloud image
274     :return: None
275     """
276     # TODO(trozet): Come up with a better way to organize this logic in this
277     # function
278     logging.info("Preparing image: {} for deployment".format(img))
279     if not os.path.isfile(img):
280         logging.error("Missing SDN image {}".format(img))
281         raise ApexDeployException("Missing SDN image file: {}".format(img))
282
283     ds_opts = ds['deploy_options']
284     virt_cmds = list()
285     sdn = ds_opts['sdn_controller']
286     patched_containers = set()
287     # we need this due to rhbz #1436021
288     # fixed in systemd-219-37.el7
289     if sdn is not False:
290         logging.info("Neutron openvswitch-agent disabled")
291         virt_cmds.extend([{
292             con.VIRT_RUN_CMD:
293                 "rm -f /etc/systemd/system/multi-user.target.wants/"
294                 "neutron-openvswitch-agent.service"},
295             {
296             con.VIRT_RUN_CMD:
297                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
298                 ".service"
299         }])
300
301     if ns.get('http_proxy', ''):
302         virt_cmds.append({
303             con.VIRT_RUN_CMD:
304                 "echo 'http_proxy={}' >> /etc/environment".format(
305                     ns['http_proxy'])})
306
307     if ns.get('https_proxy', ''):
308         virt_cmds.append({
309             con.VIRT_RUN_CMD:
310                 "echo 'https_proxy={}' >> /etc/environment".format(
311                     ns['https_proxy'])})
312
313     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
314     shutil.copyfile(img, tmp_oc_image)
315     logging.debug("Temporary overcloud image stored as: {}".format(
316         tmp_oc_image))
317
318     if ds_opts['vpn']:
319         oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
320         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
321         virt_cmds.append({
322             con.VIRT_RUN_CMD:
323                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
324                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
325         virt_cmds.append({
326             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
327                               "zrpcd_start.sh"})
328         virt_cmds.append({
329             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
330                               "init.d/zrpcd_start.sh' /etc/rc.local "})
331         virt_cmds.append({
332             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
333                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
334         logging.info("ZRPCD process started")
335
336     dataplane = ds_opts['dataplane']
337     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
338         logging.info("Enabling kernel modules for dpdk")
339         # file to module mapping
340         uio_types = {
341             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
342             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
343         }
344         for mod_file, mod in uio_types.items():
345             with open(mod_file, 'w') as fh:
346                 fh.write('#!/bin/bash\n')
347                 fh.write('exec /sbin/modprobe {}'.format(mod))
348                 fh.close()
349
350             virt_cmds.extend([
351                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
352                     mod_file)},
353                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
354                                    "{}".format(os.path.basename(mod_file))}
355             ])
356     if root_pw:
357         pw_op = "password:{}".format(root_pw)
358         virt_cmds.append({con.VIRT_PW: pw_op})
359
360     if dataplane == 'ovs':
361         if ds_opts['sfc']:
362             oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
363         elif sdn == 'opendaylight':
364             # FIXME(trozet) remove this after RDO is updated with fix for
365             # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
366             ovs_file = os.path.basename(con.CUSTOM_OVS)
367             ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
368             utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
369                                             targets=[ovs_file])
370             virt_cmds.extend([
371                 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
372                                                                   ovs_file))},
373                 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
374                     ovs_file)}
375             ])
376
377     if dataplane == 'fdio':
378         # Patch neutron with using OVS external interface for router
379         # and add generic linux NS interface driver
380         virt_cmds.append(
381             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
382                                "-p1 < neutron-patch-NSDriver.patch"})
383         if sdn is False:
384             virt_cmds.extend([
385                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
386                 {con.VIRT_RUN_CMD: "yum install -y "
387                                    "/root/nosdn_vpp_rpms/*.rpm"}
388             ])
389
390     undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
391         'installer_vm']['ip']
392     if sdn == 'opendaylight':
393         oc_builder.inject_opendaylight(
394             odl_version=ds_opts['odl_version'],
395             image=tmp_oc_image,
396             tmp_dir=tmp_dir,
397             uc_ip=undercloud_admin_ip,
398             os_version=ds_opts['os_version'],
399             docker_tag=docker_tag,
400         )
401         if docker_tag:
402             patched_containers = patched_containers.union({'opendaylight'})
403
404     if patches:
405         if ds_opts['os_version'] == 'master':
406             branch = ds_opts['os_version']
407         else:
408             branch = "stable/{}".format(ds_opts['os_version'])
409         logging.info('Adding patches to overcloud')
410         patched_containers = patched_containers.union(
411             c_builder.add_upstream_patches(patches,
412                                            tmp_oc_image, tmp_dir,
413                                            branch,
414                                            uc_ip=undercloud_admin_ip,
415                                            docker_tag=docker_tag))
416     # if containers with ceph, and no ceph device we need to use a
417     # persistent loop device for Ceph OSDs
418     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
419         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
420         with open(tmp_losetup, 'w') as fh:
421             fh.write(LOSETUP_SERVICE)
422         virt_cmds.extend([
423             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
424              },
425             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
426                 .format(LOOP_DEVICE_SIZE)},
427             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
428             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
429         ])
430     # TODO(trozet) remove this after LP#173474 is fixed
431     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
432     virt_cmds.append(
433         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
434                            "ConditionPathExists".format(dhcp_unit)})
435     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
436     logging.info("Overcloud image customization complete")
437     return patched_containers
438
439
440 def make_ssh_key():
441     """
442     Creates public and private ssh keys with 1024 bit RSA encryption
443     :return: private, public key
444     """
445     key = rsa.generate_private_key(
446         backend=crypto_default_backend(),
447         public_exponent=65537,
448         key_size=1024
449     )
450
451     private_key = key.private_bytes(
452         crypto_serialization.Encoding.PEM,
453         crypto_serialization.PrivateFormat.PKCS8,
454         crypto_serialization.NoEncryption())
455     public_key = key.public_key().public_bytes(
456         crypto_serialization.Encoding.OpenSSH,
457         crypto_serialization.PublicFormat.OpenSSH
458     )
459     return private_key.decode('utf-8'), public_key.decode('utf-8')
460
461
462 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
463     """
464     Creates modified opnfv/network environments for deployment
465     :param ds: deploy settings
466     :param ns: network settings
467     :param inv: node inventory
468     :param opnfv_env: file path for opnfv-environment file
469     :param net_env: file path for network-environment file
470     :param tmp_dir: Apex tmp dir
471     :return:
472     """
473
474     logging.info("Preparing opnfv-environment and network-environment files")
475     ds_opts = ds['deploy_options']
476     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
477     shutil.copyfile(opnfv_env, tmp_opnfv_env)
478     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
479     tenant_nic = dict()
480     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
481     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
482     external_nic_map = ns['networks']['external'][0]['nic_mapping']
483     external_nic = dict()
484     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
485
486     # SSH keys
487     private_key, public_key = make_ssh_key()
488
489     num_control, num_compute = inv.get_node_counts()
490     if num_control > 1 and not ds['global_params']['ha_enabled']:
491         num_control = 1
492
493     # Make easier/faster variables to index in the file editor
494     if 'performance' in ds_opts:
495         perf = True
496         # vpp
497         if 'vpp' in ds_opts['performance']['Compute']:
498             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
499         else:
500             perf_vpp_comp = None
501         if 'vpp' in ds_opts['performance']['Controller']:
502             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
503         else:
504             perf_vpp_ctrl = None
505
506         # ovs
507         if 'ovs' in ds_opts['performance']['Compute']:
508             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
509         else:
510             perf_ovs_comp = None
511
512         # kernel
513         if 'kernel' in ds_opts['performance']['Compute']:
514             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
515         else:
516             perf_kern_comp = None
517     else:
518         perf = False
519
520     tenant_settings = ns['networks']['tenant']
521     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
522         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
523
524     # Modify OPNFV environment
525     # TODO: Change to build a dict and outputting yaml rather than parsing
526     for line in fileinput.input(tmp_opnfv_env, inplace=True):
527         line = line.strip('\n')
528         output_line = line
529         if 'CloudDomain' in line:
530             output_line = "  CloudDomain: {}".format(ns['domain_name'])
531         elif 'replace_private_key' in line:
532             output_line = "    private_key: |\n"
533             key_out = ''
534             for line in private_key.splitlines():
535                 key_out += "      {}\n".format(line)
536             output_line += key_out
537         elif 'replace_public_key' in line:
538             output_line = "    public_key: '{}'".format(public_key)
539         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
540                 'resource_registry' in line:
541             output_line = "resource_registry:\n" \
542                           "  OS::TripleO::NodeUserData: first-boot.yaml"
543         elif 'ComputeExtraConfigPre' in line and \
544                 ds_opts['dataplane'] == 'ovs_dpdk':
545             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
546                           './ovs-dpdk-preconfig.yaml'
547         elif 'NeutronNetworkVLANRanges' in line:
548             vlan_setting = ''
549             if tenant_vlan_enabled:
550                 if ns['networks']['tenant']['overlay_id_range']:
551                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
552                     if 'datacentre' not in vlan_setting:
553                         vlan_setting += ',datacentre:1:1000'
554             # SRIOV networks are VLAN based provider networks. In order to
555             # simplify the deployment, nfv_sriov will be the default physnet.
556             # VLANs are not needed in advance, and the user will have to create
557             # the network specifying the segmentation-id.
558             if ds_opts['sriov']:
559                 if vlan_setting:
560                     vlan_setting += ",nfv_sriov"
561                 else:
562                     vlan_setting = "datacentre:1:1000,nfv_sriov"
563             if vlan_setting:
564                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
565         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
566             if tenant_settings['overlay_id_range']:
567                 physnets = tenant_settings['overlay_id_range'].split(',')
568                 output_line = "  NeutronBridgeMappings: "
569                 for physnet in physnets:
570                     physnet_name = physnet.split(':')[0]
571                     if physnet_name != 'datacentre':
572                         output_line += "{}:br-vlan,".format(physnet_name)
573                 output_line += "datacentre:br-ex"
574         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
575                 and ds_opts['sdn_controller'] == 'opendaylight':
576             if tenant_settings['overlay_id_range']:
577                 physnets = tenant_settings['overlay_id_range'].split(',')
578                 output_line = "  OpenDaylightProviderMappings: "
579                 for physnet in physnets:
580                     physnet_name = physnet.split(':')[0]
581                     if physnet_name != 'datacentre':
582                         output_line += "{}:br-vlan,".format(physnet_name)
583                 output_line += "datacentre:br-ex"
584         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
585             output_line = "  NeutronNetworkType: vlan\n" \
586                           "  NeutronTunnelTypes: ''"
587
588         if ds_opts['sdn_controller'] == 'opendaylight' and \
589                 'odl_vpp_routing_node' in ds_opts:
590             if 'opendaylight::vpp_routing_node' in line:
591                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
592                                .format(ds_opts['odl_vpp_routing_node'],
593                                        ns['domain_name']))
594         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
595             if 'NeutronVPPAgentPhysnets' in line:
596                 # VPP interface tap0 will be used for external network
597                 # connectivity.
598                 output_line = ("  NeutronVPPAgentPhysnets: "
599                                "'datacentre:{},external:tap0'"
600                                .format(tenant_nic['Controller']))
601         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
602                 'dvr') is True:
603             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
604                 output_line = ''
605             elif 'NeutronDhcpAgentsPerNetwork' in line:
606                 if num_compute == 0:
607                     num_dhcp_agents = num_control
608                 else:
609                     num_dhcp_agents = num_compute
610                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
611                                .format(num_dhcp_agents))
612             elif 'ComputeServices' in line:
613                 output_line = ("  ComputeServices:\n"
614                                "    - OS::TripleO::Services::NeutronDhcpAgent")
615
616         if perf:
617             for role in 'NovaCompute', 'Controller':
618                 if role == 'NovaCompute':
619                     perf_opts = perf_vpp_comp
620                 else:
621                     perf_opts = perf_vpp_ctrl
622                 cfg = "{}ExtraConfig".format(role)
623                 if cfg in line and perf_opts:
624                     perf_line = ''
625                     if 'main-core' in perf_opts:
626                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
627                                       .format(perf_opts['main-core']))
628                     if 'corelist-workers' in perf_opts:
629                         perf_line += ("\n    "
630                                       "fdio::vpp_cpu_corelist_workers: '{}'"
631                                       .format(perf_opts['corelist-workers']))
632                     if ds_opts['sdn_controller'] == 'opendaylight' and \
633                             ds_opts['dataplane'] == 'fdio':
634                         if role == 'NovaCompute':
635                             perf_line += ("\n    "
636                                           "tripleo::profile::base::neutron::"
637                                           "agents::honeycomb::"
638                                           "interface_role_mapping:"
639                                           " ['{}:tenant-interface',"
640                                           "'{}:public-interface']"
641                                           .format(tenant_nic[role],
642                                                   external_nic[role]))
643                         else:
644                             perf_line += ("\n    "
645                                           "tripleo::profile::base::neutron::"
646                                           "agents::honeycomb::"
647                                           "interface_role_mapping:"
648                                           " ['{}:tenant-interface']"
649                                           .format(tenant_nic[role]))
650                     if perf_line:
651                         output_line = ("  {}:{}".format(cfg, perf_line))
652
653             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
654                 for k, v in OVS_PERF_MAP.items():
655                     if k in line and v in perf_ovs_comp:
656                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
657
658             # kernel args
659             # (FIXME) use compute's kernel settings for all nodes for now.
660             if perf_kern_comp:
661                 if 'NovaSchedulerDefaultFilters' in line:
662                     output_line = \
663                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
664                         "ComputeFilter,AvailabilityZoneFilter," \
665                         "ComputeCapabilitiesFilter," \
666                         "ImagePropertiesFilter,NUMATopologyFilter'"
667                 elif 'ComputeKernelArgs' in line:
668                     kernel_args = ''
669                     for k, v in perf_kern_comp.items():
670                         kernel_args += "{}={} ".format(k, v)
671                     if kernel_args:
672                         output_line = "  ComputeKernelArgs: '{}'".\
673                             format(kernel_args)
674
675         print(output_line)
676
677     # Merge compute services into control services if only a single
678     # node deployment
679     if num_compute == 0:
680         logging.info("All in one deployment. Checking if service merging "
681                      "required into control services")
682         with open(tmp_opnfv_env, 'r') as fh:
683             data = yaml.safe_load(fh)
684         param_data = data['parameter_defaults']
685         # Check to see if any parameters are set for Compute
686         for param in param_data.keys():
687             if param != 'ComputeServices' and param.startswith('Compute'):
688                 logging.warning("Compute parameter set, but will not be used "
689                                 "in deployment: {}. Please use Controller "
690                                 "based parameters when using All-in-one "
691                                 "deployments".format(param))
692         if ('ControllerServices' in param_data and 'ComputeServices' in
693                 param_data):
694             logging.info("Services detected in environment file. Merging...")
695             ctrl_services = param_data['ControllerServices']
696             cmp_services = param_data['ComputeServices']
697             param_data['ControllerServices'] = list(set().union(
698                 ctrl_services, cmp_services))
699             for dup_service in DUPLICATE_COMPUTE_SERVICES:
700                 if dup_service in param_data['ControllerServices']:
701                     param_data['ControllerServices'].remove(dup_service)
702             param_data.pop('ComputeServices')
703             logging.debug("Merged controller services: {}".format(
704                 pprint.pformat(param_data['ControllerServices'])
705             ))
706             with open(tmp_opnfv_env, 'w') as fh:
707                 yaml.safe_dump(data, fh, default_flow_style=False)
708         else:
709             logging.info("No services detected in env file, not merging "
710                          "services")
711
712     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
713     with open(tmp_opnfv_env, 'r') as fh:
714         logging.debug("opnfv-environment content is : {}".format(
715             pprint.pformat(yaml.safe_load(fh.read()))
716         ))
717
718
719 def generate_ceph_key():
720     key = os.urandom(16)
721     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
722     return base64.b64encode(header + key)
723
724
725 def prep_storage_env(ds, ns, virtual, tmp_dir):
726     """
727     Creates storage environment file for deployment.  Source file is copied by
728     undercloud playbook to host.
729     :param ds:
730     :param ns:
731     :param virtual:
732     :param tmp_dir:
733     :return:
734     """
735     ds_opts = ds['deploy_options']
736     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
737     if not os.path.isfile(storage_file):
738         logging.error("storage-environment file is not in tmp directory: {}. "
739                       "Check if file was copied from "
740                       "undercloud".format(tmp_dir))
741         raise ApexDeployException("storage-environment file not copied from "
742                                   "undercloud")
743     for line in fileinput.input(storage_file, inplace=True):
744         line = line.strip('\n')
745         if 'CephClusterFSID' in line:
746             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
747         elif 'CephMonKey' in line:
748             print("  CephMonKey: {}".format(generate_ceph_key().decode(
749                 'utf-8')))
750         elif 'CephAdminKey' in line:
751             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
752                 'utf-8')))
753         elif 'CephClientKey' in line:
754             print("  CephClientKey: {}".format(generate_ceph_key().decode(
755                 'utf-8')))
756         else:
757             print(line)
758
759     if ds_opts['containers']:
760         ceph_params = {}
761
762         # max pgs allowed are calculated as num_mons * 200. Therefore we
763         # set number of pgs and pools so that the total will be less:
764         # num_pgs * num_pools * num_osds
765         ceph_params['CephPoolDefaultSize'] = 2
766         ceph_params['CephPoolDefaultPgNum'] = 32
767         if virtual:
768             ceph_params['CephAnsibleExtraConfig'] = {
769                 'centos_package_dependencies': [],
770                 'ceph_osd_docker_memory_limit': '1g',
771                 'ceph_mds_docker_memory_limit': '1g',
772             }
773         ceph_device = ds_opts['ceph_device']
774         ceph_params['CephAnsibleDisksConfig'] = {
775             'devices': [ceph_device],
776             'journal_size': 512,
777             'osd_scenario': 'collocated'
778         }
779         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
780     # TODO(trozet): remove following block as we only support containers now
781     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
782         with open(storage_file, 'a') as fh:
783             fh.write('  ExtraConfig:\n')
784             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
785                 ds_opts['ceph_device']
786             ))
787
788
789 def prep_sriov_env(ds, tmp_dir):
790     """
791     Creates SRIOV environment file for deployment. Source file is copied by
792     undercloud playbook to host.
793     :param ds:
794     :param tmp_dir:
795     :return:
796     """
797     ds_opts = ds['deploy_options']
798     sriov_iface = ds_opts['sriov']
799     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
800     if not os.path.isfile(sriov_file):
801         logging.error("sriov-environment file is not in tmp directory: {}. "
802                       "Check if file was copied from "
803                       "undercloud".format(tmp_dir))
804         raise ApexDeployException("sriov-environment file not copied from "
805                                   "undercloud")
806     # TODO(rnoriega): Instead of line editing, refactor this code to load
807     # yaml file into a dict, edit it and write the file back.
808     for line in fileinput.input(sriov_file, inplace=True):
809         line = line.strip('\n')
810         if 'NovaSchedulerDefaultFilters' in line:
811             print("  {}".format(line[3:]))
812         elif 'NovaSchedulerAvailableFilters' in line:
813             print("  {}".format(line[3:]))
814         elif 'NeutronPhysicalDevMappings' in line:
815             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
816                   .format(sriov_iface))
817         elif 'NeutronSriovNumVFs' in line:
818             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
819         elif 'NovaPCIPassthrough' in line:
820             print("  NovaPCIPassthrough:")
821         elif 'devname' in line:
822             print("    - devname: \"{}\"".format(sriov_iface))
823         elif 'physical_network' in line:
824             print("      physical_network: \"nfv_sriov\"")
825         else:
826             print(line)
827
828
829 def external_network_cmds(ns, ds):
830     """
831     Generates external network openstack commands
832     :param ns: network settings
833     :param ds: deploy settings
834     :return: list of commands to configure external network
835     """
836     ds_opts = ds['deploy_options']
837     external_physnet = 'datacentre'
838     if ds_opts['dataplane'] == 'fdio' and \
839        ds_opts['sdn_controller'] != 'opendaylight':
840         external_physnet = 'external'
841     if 'external' in ns.enabled_network_list:
842         net_config = ns['networks']['external'][0]
843         external = True
844         pool_start, pool_end = net_config['floating_ip_range']
845     else:
846         net_config = ns['networks']['admin']
847         external = False
848         pool_start, pool_end = ns['apex']['networks']['admin'][
849             'introspection_range']
850     nic_config = net_config['nic_mapping']
851     gateway = net_config['gateway']
852     cmds = list()
853     # create network command
854     if nic_config['compute']['vlan'] == 'native':
855         ext_type = 'flat'
856     else:
857         ext_type = "vlan --provider-segment {}".format(nic_config[
858                                                        'compute']['vlan'])
859     cmds.append("openstack network create external --project service "
860                 "--external --provider-network-type {} "
861                 "--provider-physical-network {}"
862                 .format(ext_type, external_physnet))
863     # create subnet command
864     cidr = net_config['cidr']
865     subnet_cmd = "openstack subnet create external-subnet --project " \
866                  "service --network external --no-dhcp --gateway {} " \
867                  "--allocation-pool start={},end={} --subnet-range " \
868                  "{}".format(gateway, pool_start, pool_end, str(cidr))
869     if external and cidr.version == 6:
870         subnet_cmd += ' --ip-version 6'
871     cmds.append(subnet_cmd)
872     logging.debug("Neutron external network commands determined "
873                   "as: {}".format(cmds))
874     return cmds
875
876
877 def create_congress_cmds(overcloud_file):
878     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
879     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
880     logging.info("Creating congress commands")
881     try:
882         ds_cfg = [
883             "username={}".format(overcloudrc['OS_USERNAME']),
884             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
885             "password={}".format(overcloudrc['OS_PASSWORD']),
886             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
887         ]
888     except KeyError:
889         logging.error("Unable to find all keys required for congress in "
890                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
891                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
892                       "file: {}".format(overcloud_file))
893         raise
894     cmds = list()
895     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
896
897     for driver in drivers:
898         if driver == 'doctor':
899             cmd = "{} \"{}\"".format(driver, driver)
900         else:
901             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
902         if driver == 'nova':
903             cmd += ' --config api_version="2.34"'
904         logging.debug("Congress command created: {}".format(cmd))
905         cmds.append(cmd)
906     return cmds