Merge "Fixes Docker image upload for master/rocky"
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import pprint
16 import shutil
17 import uuid
18 import struct
19 import time
20 import yaml
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
23
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
30     crypto_serialization
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33     crypto_default_backend
34
35
36 SDN_FILE_MAP = {
37     'opendaylight': {
38         'sfc': 'neutron-sfc-opendaylight.yaml',
39         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40         'gluon': 'gluon.yaml',
41         'vpp': {
42             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44             'default': 'neutron-opendaylight-honeycomb.yaml'
45         },
46         'l2gw': 'neutron-l2gw-opendaylight.yaml',
47         'sriov': 'neutron-opendaylight-sriov.yaml',
48         'default': 'neutron-opendaylight.yaml',
49     },
50     'onos': {
51         'sfc': 'neutron-onos-sfc.yaml',
52         'default': 'neutron-onos.yaml'
53     },
54     'ovn': 'neutron-ml2-ovn.yaml',
55     False: {
56         'vpp': 'neutron-ml2-vpp.yaml',
57         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
58     }
59 }
60
61 OTHER_FILE_MAP = {
62     'tacker': 'enable_tacker.yaml',
63     'congress': 'enable_congress.yaml',
64     'barometer': 'enable_barometer.yaml',
65     'rt_kvm': 'enable_rt_kvm.yaml'
66 }
67
68 OVS_PERF_MAP = {
69     'HostCpusList': 'dpdk_cores',
70     'NeutronDpdkCoreList': 'pmd_cores',
71     'NeutronDpdkSocketMemory': 'socket_memory',
72     'NeutronDpdkMemoryChannels': 'memory_channels'
73 }
74
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95 DUPLICATE_COMPUTE_SERVICES = [
96     'OS::TripleO::Services::ComputeNeutronCorePlugin',
97     'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98     'OS::TripleO::Services::ComputeNeutronOvsAgent',
99     'OS::TripleO::Services::ComputeNeutronL3Agent'
100 ]
101
102
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
104     """
105     Builds a list of SDN environment files to be used in the deploy cmd.
106
107     This function recursively searches an sdn_map.  First the sdn controller is
108     matched and then the function looks for enabled features for that
109     controller to determine which environment files should be used.  By
110     default the feature will be checked if set to true in deploy settings to be
111     added to the list.  If a feature does not have a boolean value, then the
112     key and value pair to compare with are checked as a tuple (k,v).
113
114     :param ds: deploy settings
115     :param sdn_map: SDN map to recursively search
116     :param env_list: recursive var to hold previously found env_list
117     :return: A list of env files
118     """
119     if env_list is None:
120         env_list = list()
121     for k, v in sdn_map.items():
122         if ds['sdn_controller'] == k or (k in ds and ds[k]):
123             if isinstance(v, dict):
124                 # Append default SDN env file first
125                 # The assumption is that feature-enabled SDN env files
126                 # override and do not conflict with previously set default
127                 # settings
128                 if ds['sdn_controller'] == k and 'default' in v:
129                     env_list.append(os.path.join(con.THT_ENV_DIR,
130                                                  v['default']))
131                 env_list.extend(build_sdn_env_list(ds, v))
132             # check if the value is not a boolean
133             elif isinstance(v, tuple):
134                     if ds[k] == v[0]:
135                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
136             else:
137                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138     if len(env_list) == 0:
139         try:
140             env_list.append(os.path.join(
141                 con.THT_ENV_DIR, sdn_map['default']))
142         except KeyError:
143             logging.warning("Unable to find default file for SDN")
144
145     return env_list
146
147
148 def get_docker_sdn_files(ds_opts):
149     """
150     Returns docker env file for detected SDN
151     :param ds_opts: deploy options
152     :return: list of docker THT env files for an SDN
153     """
154     docker_services = con.VALID_DOCKER_SERVICES
155     tht_dir = con.THT_DOCKER_ENV_DIR
156     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157     for i, sdn_file in enumerate(sdn_env_list):
158         sdn_base = os.path.basename(sdn_file)
159         if sdn_base in docker_services:
160             if docker_services[sdn_base] is not None:
161                 sdn_env_list[i] = \
162                     os.path.join(tht_dir, docker_services[sdn_base])
163             else:
164                 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
165     return sdn_env_list
166
167
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169                       virtual, env_file='opnfv-environment.yaml',
170                       net_data=False):
171
172     logging.info("Creating deployment command")
173     deploy_options = ['network-environment.yaml']
174
175     ds_opts = ds['deploy_options']
176
177     if ds_opts['containers']:
178         deploy_options.append(os.path.join(con.THT_ENV_DIR,
179                                            'docker.yaml'))
180
181     if ds['global_params']['ha_enabled']:
182         if ds_opts['containers']:
183             deploy_options.append(os.path.join(con.THT_ENV_DIR,
184                                                'docker-ha.yaml'))
185         else:
186             deploy_options.append(os.path.join(con.THT_ENV_DIR,
187                                                'puppet-pacemaker.yaml'))
188
189     if env_file:
190         deploy_options.append(env_file)
191
192     if ds_opts['containers']:
193         deploy_options.append('docker-images.yaml')
194         sdn_docker_files = get_docker_sdn_files(ds_opts)
195         for sdn_docker_file in sdn_docker_files:
196             deploy_options.append(sdn_docker_file)
197     else:
198         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
199
200     for k, v in OTHER_FILE_MAP.items():
201         if k in ds_opts and ds_opts[k]:
202             if ds_opts['containers']:
203                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
204                                                    "{}.yaml".format(k)))
205             else:
206                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
207
208     if ds_opts['ceph'] and 'csit' not in env_file:
209         prep_storage_env(ds, ns, virtual, tmp_dir)
210         deploy_options.append(os.path.join(con.THT_ENV_DIR,
211                                            'storage-environment.yaml'))
212     if ds_opts['sriov']:
213         prep_sriov_env(ds, tmp_dir)
214
215     # Check for 'k8s' here intentionally, as we may support other values
216     # such as openstack/openshift for 'vim' option.
217     if ds_opts['vim'] == 'k8s':
218         deploy_options.append('kubernetes-environment.yaml')
219
220     if virtual:
221         deploy_options.append('virtual-environment.yaml')
222     else:
223         deploy_options.append('baremetal-environment.yaml')
224
225     num_control, num_compute = inv.get_node_counts()
226     if num_control > 1 and not ds['global_params']['ha_enabled']:
227         num_control = 1
228     if platform.machine() == 'aarch64':
229         # aarch64 deploys were not completing in the default 90 mins.
230         # Not sure if this is related to the hardware the OOO support
231         # was developed on or the virtualization support in CentOS
232         # Either way it will probably get better over time  as the aarch
233         # support matures in CentOS and deploy time should be tested in
234         # the future so this multiplier can be removed.
235         con.DEPLOY_TIMEOUT *= 2
236     cmd = "openstack overcloud deploy --templates --timeout {} " \
237           .format(con.DEPLOY_TIMEOUT)
238     # build cmd env args
239     for option in deploy_options:
240         cmd += " -e {}".format(option)
241     cmd += " --ntp-server {}".format(ns['ntp'][0])
242     cmd += " --control-scale {}".format(num_control)
243     cmd += " --compute-scale {}".format(num_compute)
244     cmd += ' --control-flavor control --compute-flavor compute'
245     if net_data:
246         cmd += ' --networks-file network_data.yaml'
247     libvirt_type = 'kvm'
248     if virtual:
249         with open('/sys/module/kvm_intel/parameters/nested') as f:
250             nested_kvm = f.read().strip()
251             if nested_kvm != 'Y':
252                 libvirt_type = 'qemu'
253     cmd += ' --libvirt-type {}'.format(libvirt_type)
254     logging.info("Deploy command set: {}".format(cmd))
255
256     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
257         fh.write(cmd)
258     return cmd
259
260
261 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
262                patches=None):
263     """
264     Locates sdn image and preps for deployment.
265     :param ds: deploy settings
266     :param ns: network settings
267     :param img: sdn image
268     :param tmp_dir: dir to store modified sdn image
269     :param root_pw: password to configure for overcloud image
270     :param docker_tag: Docker image tag for RDO version (default None)
271     :param patches: List of patches to apply to overcloud image
272     :return: None
273     """
274     # TODO(trozet): Come up with a better way to organize this logic in this
275     # function
276     logging.info("Preparing image: {} for deployment".format(img))
277     if not os.path.isfile(img):
278         logging.error("Missing SDN image {}".format(img))
279         raise ApexDeployException("Missing SDN image file: {}".format(img))
280
281     ds_opts = ds['deploy_options']
282     virt_cmds = list()
283     sdn = ds_opts['sdn_controller']
284     patched_containers = set()
285     # we need this due to rhbz #1436021
286     # fixed in systemd-219-37.el7
287     if sdn is not False:
288         logging.info("Neutron openvswitch-agent disabled")
289         virt_cmds.extend([{
290             con.VIRT_RUN_CMD:
291                 "rm -f /etc/systemd/system/multi-user.target.wants/"
292                 "neutron-openvswitch-agent.service"},
293             {
294             con.VIRT_RUN_CMD:
295                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
296                 ".service"
297         }])
298
299     if ns.get('http_proxy', ''):
300         virt_cmds.append({
301             con.VIRT_RUN_CMD:
302                 "echo 'http_proxy={}' >> /etc/environment".format(
303                     ns['http_proxy'])})
304
305     if ns.get('https_proxy', ''):
306         virt_cmds.append({
307             con.VIRT_RUN_CMD:
308                 "echo 'https_proxy={}' >> /etc/environment".format(
309                     ns['https_proxy'])})
310
311     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
312     shutil.copyfile(img, tmp_oc_image)
313     logging.debug("Temporary overcloud image stored as: {}".format(
314         tmp_oc_image))
315
316     if ds_opts['vpn']:
317         oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
318         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
319         virt_cmds.append({
320             con.VIRT_RUN_CMD:
321                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
322                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
323         virt_cmds.append({
324             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
325                               "zrpcd_start.sh"})
326         virt_cmds.append({
327             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
328                               "init.d/zrpcd_start.sh' /etc/rc.local "})
329         virt_cmds.append({
330             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
331                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
332         logging.info("ZRPCD process started")
333
334     dataplane = ds_opts['dataplane']
335     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
336         logging.info("Enabling kernel modules for dpdk")
337         # file to module mapping
338         uio_types = {
339             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
340             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
341         }
342         for mod_file, mod in uio_types.items():
343             with open(mod_file, 'w') as fh:
344                 fh.write('#!/bin/bash\n')
345                 fh.write('exec /sbin/modprobe {}'.format(mod))
346                 fh.close()
347
348             virt_cmds.extend([
349                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
350                     mod_file)},
351                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
352                                    "{}".format(os.path.basename(mod_file))}
353             ])
354     if root_pw:
355         pw_op = "password:{}".format(root_pw)
356         virt_cmds.append({con.VIRT_PW: pw_op})
357
358     if dataplane == 'ovs':
359         if ds_opts['sfc']:
360             oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
361         elif sdn == 'opendaylight':
362             # FIXME(trozet) remove this after RDO is updated with fix for
363             # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
364             ovs_file = os.path.basename(con.CUSTOM_OVS)
365             ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
366             utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
367                                             targets=[ovs_file])
368             virt_cmds.extend([
369                 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
370                                                                   ovs_file))},
371                 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
372                     ovs_file)}
373             ])
374
375     if dataplane == 'fdio':
376         # Patch neutron with using OVS external interface for router
377         # and add generic linux NS interface driver
378         virt_cmds.append(
379             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
380                                "-p1 < neutron-patch-NSDriver.patch"})
381         if sdn is False:
382             virt_cmds.extend([
383                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
384                 {con.VIRT_RUN_CMD: "yum install -y "
385                                    "/root/nosdn_vpp_rpms/*.rpm"}
386             ])
387
388     undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
389         'installer_vm']['ip']
390     if sdn == 'opendaylight':
391         oc_builder.inject_opendaylight(
392             odl_version=ds_opts['odl_version'],
393             image=tmp_oc_image,
394             tmp_dir=tmp_dir,
395             uc_ip=undercloud_admin_ip,
396             os_version=ds_opts['os_version'],
397             docker_tag=docker_tag,
398         )
399         if docker_tag:
400             patched_containers = patched_containers.union({'opendaylight'})
401
402     if patches:
403         if ds_opts['os_version'] == 'master':
404             branch = ds_opts['os_version']
405         else:
406             branch = "stable/{}".format(ds_opts['os_version'])
407         logging.info('Adding patches to overcloud')
408         patched_containers = patched_containers.union(
409             c_builder.add_upstream_patches(patches,
410                                            tmp_oc_image, tmp_dir,
411                                            branch,
412                                            uc_ip=undercloud_admin_ip,
413                                            docker_tag=docker_tag))
414     # if containers with ceph, and no ceph device we need to use a
415     # persistent loop device for Ceph OSDs
416     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
417         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
418         with open(tmp_losetup, 'w') as fh:
419             fh.write(LOSETUP_SERVICE)
420         virt_cmds.extend([
421             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
422              },
423             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
424                 .format(LOOP_DEVICE_SIZE)},
425             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
426             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
427         ])
428     # TODO(trozet) remove this after LP#173474 is fixed
429     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
430     virt_cmds.append(
431         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
432                            "ConditionPathExists".format(dhcp_unit)})
433     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
434     logging.info("Overcloud image customization complete")
435     return patched_containers
436
437
438 def make_ssh_key():
439     """
440     Creates public and private ssh keys with 1024 bit RSA encryption
441     :return: private, public key
442     """
443     key = rsa.generate_private_key(
444         backend=crypto_default_backend(),
445         public_exponent=65537,
446         key_size=1024
447     )
448
449     private_key = key.private_bytes(
450         crypto_serialization.Encoding.PEM,
451         crypto_serialization.PrivateFormat.PKCS8,
452         crypto_serialization.NoEncryption())
453     public_key = key.public_key().public_bytes(
454         crypto_serialization.Encoding.OpenSSH,
455         crypto_serialization.PublicFormat.OpenSSH
456     )
457     return private_key.decode('utf-8'), public_key.decode('utf-8')
458
459
460 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
461     """
462     Creates modified opnfv/network environments for deployment
463     :param ds: deploy settings
464     :param ns: network settings
465     :param inv: node inventory
466     :param opnfv_env: file path for opnfv-environment file
467     :param net_env: file path for network-environment file
468     :param tmp_dir: Apex tmp dir
469     :return:
470     """
471
472     logging.info("Preparing opnfv-environment and network-environment files")
473     ds_opts = ds['deploy_options']
474     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
475     shutil.copyfile(opnfv_env, tmp_opnfv_env)
476     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
477     tenant_nic = dict()
478     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
479     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
480     external_nic_map = ns['networks']['external'][0]['nic_mapping']
481     external_nic = dict()
482     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
483
484     # SSH keys
485     private_key, public_key = make_ssh_key()
486
487     num_control, num_compute = inv.get_node_counts()
488     if num_control > 1 and not ds['global_params']['ha_enabled']:
489         num_control = 1
490
491     # Make easier/faster variables to index in the file editor
492     if 'performance' in ds_opts:
493         perf = True
494         # vpp
495         if 'vpp' in ds_opts['performance']['Compute']:
496             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
497         else:
498             perf_vpp_comp = None
499         if 'vpp' in ds_opts['performance']['Controller']:
500             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
501         else:
502             perf_vpp_ctrl = None
503
504         # ovs
505         if 'ovs' in ds_opts['performance']['Compute']:
506             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
507         else:
508             perf_ovs_comp = None
509
510         # kernel
511         if 'kernel' in ds_opts['performance']['Compute']:
512             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
513         else:
514             perf_kern_comp = None
515     else:
516         perf = False
517
518     tenant_settings = ns['networks']['tenant']
519     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
520         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
521
522     # Modify OPNFV environment
523     # TODO: Change to build a dict and outputting yaml rather than parsing
524     for line in fileinput.input(tmp_opnfv_env, inplace=True):
525         line = line.strip('\n')
526         output_line = line
527         if 'CloudDomain' in line:
528             output_line = "  CloudDomain: {}".format(ns['domain_name'])
529         elif 'replace_private_key' in line:
530             output_line = "    private_key: |\n"
531             key_out = ''
532             for line in private_key.splitlines():
533                 key_out += "      {}\n".format(line)
534             output_line += key_out
535         elif 'replace_public_key' in line:
536             output_line = "    public_key: '{}'".format(public_key)
537         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
538                 'resource_registry' in line:
539             output_line = "resource_registry:\n" \
540                           "  OS::TripleO::NodeUserData: first-boot.yaml"
541         elif 'ComputeExtraConfigPre' in line and \
542                 ds_opts['dataplane'] == 'ovs_dpdk':
543             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
544                           './ovs-dpdk-preconfig.yaml'
545         elif 'NeutronNetworkVLANRanges' in line:
546             vlan_setting = ''
547             if tenant_vlan_enabled:
548                 if ns['networks']['tenant']['overlay_id_range']:
549                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
550                     if 'datacentre' not in vlan_setting:
551                         vlan_setting += ',datacentre:1:1000'
552             # SRIOV networks are VLAN based provider networks. In order to
553             # simplify the deployment, nfv_sriov will be the default physnet.
554             # VLANs are not needed in advance, and the user will have to create
555             # the network specifying the segmentation-id.
556             if ds_opts['sriov']:
557                 if vlan_setting:
558                     vlan_setting += ",nfv_sriov"
559                 else:
560                     vlan_setting = "datacentre:1:1000,nfv_sriov"
561             if vlan_setting:
562                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
563         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
564             if tenant_settings['overlay_id_range']:
565                 physnets = tenant_settings['overlay_id_range'].split(',')
566                 output_line = "  NeutronBridgeMappings: "
567                 for physnet in physnets:
568                     physnet_name = physnet.split(':')[0]
569                     if physnet_name != 'datacentre':
570                         output_line += "{}:br-vlan,".format(physnet_name)
571                 output_line += "datacentre:br-ex"
572         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
573                 and ds_opts['sdn_controller'] == 'opendaylight':
574             if tenant_settings['overlay_id_range']:
575                 physnets = tenant_settings['overlay_id_range'].split(',')
576                 output_line = "  OpenDaylightProviderMappings: "
577                 for physnet in physnets:
578                     physnet_name = physnet.split(':')[0]
579                     if physnet_name != 'datacentre':
580                         output_line += "{}:br-vlan,".format(physnet_name)
581                 output_line += "datacentre:br-ex"
582         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
583             output_line = "  NeutronNetworkType: vlan\n" \
584                           "  NeutronTunnelTypes: ''"
585
586         if ds_opts['sdn_controller'] == 'opendaylight' and \
587                 'odl_vpp_routing_node' in ds_opts:
588             if 'opendaylight::vpp_routing_node' in line:
589                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
590                                .format(ds_opts['odl_vpp_routing_node'],
591                                        ns['domain_name']))
592         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
593             if 'NeutronVPPAgentPhysnets' in line:
594                 # VPP interface tap0 will be used for external network
595                 # connectivity.
596                 output_line = ("  NeutronVPPAgentPhysnets: "
597                                "'datacentre:{},external:tap0'"
598                                .format(tenant_nic['Controller']))
599         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
600                 'dvr') is True:
601             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
602                 output_line = ''
603             elif 'NeutronDhcpAgentsPerNetwork' in line:
604                 if num_compute == 0:
605                     num_dhcp_agents = num_control
606                 else:
607                     num_dhcp_agents = num_compute
608                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
609                                .format(num_dhcp_agents))
610             elif 'ComputeServices' in line:
611                 output_line = ("  ComputeServices:\n"
612                                "    - OS::TripleO::Services::NeutronDhcpAgent")
613
614         if perf:
615             for role in 'NovaCompute', 'Controller':
616                 if role == 'NovaCompute':
617                     perf_opts = perf_vpp_comp
618                 else:
619                     perf_opts = perf_vpp_ctrl
620                 cfg = "{}ExtraConfig".format(role)
621                 if cfg in line and perf_opts:
622                     perf_line = ''
623                     if 'main-core' in perf_opts:
624                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
625                                       .format(perf_opts['main-core']))
626                     if 'corelist-workers' in perf_opts:
627                         perf_line += ("\n    "
628                                       "fdio::vpp_cpu_corelist_workers: '{}'"
629                                       .format(perf_opts['corelist-workers']))
630                     if ds_opts['sdn_controller'] == 'opendaylight' and \
631                             ds_opts['dataplane'] == 'fdio':
632                         if role == 'NovaCompute':
633                             perf_line += ("\n    "
634                                           "tripleo::profile::base::neutron::"
635                                           "agents::honeycomb::"
636                                           "interface_role_mapping:"
637                                           " ['{}:tenant-interface',"
638                                           "'{}:public-interface']"
639                                           .format(tenant_nic[role],
640                                                   external_nic[role]))
641                         else:
642                             perf_line += ("\n    "
643                                           "tripleo::profile::base::neutron::"
644                                           "agents::honeycomb::"
645                                           "interface_role_mapping:"
646                                           " ['{}:tenant-interface']"
647                                           .format(tenant_nic[role]))
648                     if perf_line:
649                         output_line = ("  {}:{}".format(cfg, perf_line))
650
651             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
652                 for k, v in OVS_PERF_MAP.items():
653                     if k in line and v in perf_ovs_comp:
654                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
655
656             # kernel args
657             # (FIXME) use compute's kernel settings for all nodes for now.
658             if perf_kern_comp:
659                 if 'NovaSchedulerDefaultFilters' in line:
660                     output_line = \
661                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
662                         "ComputeFilter,AvailabilityZoneFilter," \
663                         "ComputeCapabilitiesFilter," \
664                         "ImagePropertiesFilter,NUMATopologyFilter'"
665                 elif 'ComputeKernelArgs' in line:
666                     kernel_args = ''
667                     for k, v in perf_kern_comp.items():
668                         kernel_args += "{}={} ".format(k, v)
669                     if kernel_args:
670                         output_line = "  ComputeKernelArgs: '{}'".\
671                             format(kernel_args)
672
673         print(output_line)
674
675     # Merge compute services into control services if only a single
676     # node deployment
677     if num_compute == 0:
678         logging.info("All in one deployment. Checking if service merging "
679                      "required into control services")
680         with open(tmp_opnfv_env, 'r') as fh:
681             data = yaml.safe_load(fh)
682         param_data = data['parameter_defaults']
683         # Check to see if any parameters are set for Compute
684         for param in param_data.keys():
685             if param != 'ComputeServices' and param.startswith('Compute'):
686                 logging.warning("Compute parameter set, but will not be used "
687                                 "in deployment: {}. Please use Controller "
688                                 "based parameters when using All-in-one "
689                                 "deployments".format(param))
690         if ('ControllerServices' in param_data and 'ComputeServices' in
691                 param_data):
692             logging.info("Services detected in environment file. Merging...")
693             ctrl_services = param_data['ControllerServices']
694             cmp_services = param_data['ComputeServices']
695             param_data['ControllerServices'] = list(set().union(
696                 ctrl_services, cmp_services))
697             for dup_service in DUPLICATE_COMPUTE_SERVICES:
698                 if dup_service in param_data['ControllerServices']:
699                     param_data['ControllerServices'].remove(dup_service)
700             param_data.pop('ComputeServices')
701             logging.debug("Merged controller services: {}".format(
702                 pprint.pformat(param_data['ControllerServices'])
703             ))
704             with open(tmp_opnfv_env, 'w') as fh:
705                 yaml.safe_dump(data, fh, default_flow_style=False)
706         else:
707             logging.info("No services detected in env file, not merging "
708                          "services")
709
710     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
711     with open(tmp_opnfv_env, 'r') as fh:
712         logging.debug("opnfv-environment content is : {}".format(
713             pprint.pformat(yaml.safe_load(fh.read()))
714         ))
715
716
717 def generate_ceph_key():
718     key = os.urandom(16)
719     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
720     return base64.b64encode(header + key)
721
722
723 def prep_storage_env(ds, ns, virtual, tmp_dir):
724     """
725     Creates storage environment file for deployment.  Source file is copied by
726     undercloud playbook to host.
727     :param ds:
728     :param ns:
729     :param virtual:
730     :param tmp_dir:
731     :return:
732     """
733     ds_opts = ds['deploy_options']
734     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
735     if not os.path.isfile(storage_file):
736         logging.error("storage-environment file is not in tmp directory: {}. "
737                       "Check if file was copied from "
738                       "undercloud".format(tmp_dir))
739         raise ApexDeployException("storage-environment file not copied from "
740                                   "undercloud")
741     for line in fileinput.input(storage_file, inplace=True):
742         line = line.strip('\n')
743         if 'CephClusterFSID' in line:
744             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
745         elif 'CephMonKey' in line:
746             print("  CephMonKey: {}".format(generate_ceph_key().decode(
747                 'utf-8')))
748         elif 'CephAdminKey' in line:
749             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
750                 'utf-8')))
751         elif 'CephClientKey' in line:
752             print("  CephClientKey: {}".format(generate_ceph_key().decode(
753                 'utf-8')))
754         else:
755             print(line)
756
757     if ds_opts['containers']:
758         ceph_params = {}
759
760         # max pgs allowed are calculated as num_mons * 200. Therefore we
761         # set number of pgs and pools so that the total will be less:
762         # num_pgs * num_pools * num_osds
763         ceph_params['CephPoolDefaultSize'] = 2
764         ceph_params['CephPoolDefaultPgNum'] = 32
765         if virtual:
766             ceph_params['CephAnsibleExtraConfig'] = {
767                 'centos_package_dependencies': [],
768                 'ceph_osd_docker_memory_limit': '1g',
769                 'ceph_mds_docker_memory_limit': '1g',
770             }
771         ceph_device = ds_opts['ceph_device']
772         ceph_params['CephAnsibleDisksConfig'] = {
773             'devices': [ceph_device],
774             'journal_size': 512,
775             'osd_scenario': 'collocated'
776         }
777         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
778     # TODO(trozet): remove following block as we only support containers now
779     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
780         with open(storage_file, 'a') as fh:
781             fh.write('  ExtraConfig:\n')
782             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
783                 ds_opts['ceph_device']
784             ))
785
786
787 def prep_sriov_env(ds, tmp_dir):
788     """
789     Creates SRIOV environment file for deployment. Source file is copied by
790     undercloud playbook to host.
791     :param ds:
792     :param tmp_dir:
793     :return:
794     """
795     ds_opts = ds['deploy_options']
796     sriov_iface = ds_opts['sriov']
797     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
798     if not os.path.isfile(sriov_file):
799         logging.error("sriov-environment file is not in tmp directory: {}. "
800                       "Check if file was copied from "
801                       "undercloud".format(tmp_dir))
802         raise ApexDeployException("sriov-environment file not copied from "
803                                   "undercloud")
804     # TODO(rnoriega): Instead of line editing, refactor this code to load
805     # yaml file into a dict, edit it and write the file back.
806     for line in fileinput.input(sriov_file, inplace=True):
807         line = line.strip('\n')
808         if 'NovaSchedulerDefaultFilters' in line:
809             print("  {}".format(line[3:]))
810         elif 'NovaSchedulerAvailableFilters' in line:
811             print("  {}".format(line[3:]))
812         elif 'NeutronPhysicalDevMappings' in line:
813             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
814                   .format(sriov_iface))
815         elif 'NeutronSriovNumVFs' in line:
816             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
817         elif 'NovaPCIPassthrough' in line:
818             print("  NovaPCIPassthrough:")
819         elif 'devname' in line:
820             print("    - devname: \"{}\"".format(sriov_iface))
821         elif 'physical_network' in line:
822             print("      physical_network: \"nfv_sriov\"")
823         else:
824             print(line)
825
826
827 def external_network_cmds(ns, ds):
828     """
829     Generates external network openstack commands
830     :param ns: network settings
831     :param ds: deploy settings
832     :return: list of commands to configure external network
833     """
834     ds_opts = ds['deploy_options']
835     external_physnet = 'datacentre'
836     if ds_opts['dataplane'] == 'fdio' and \
837        ds_opts['sdn_controller'] != 'opendaylight':
838         external_physnet = 'external'
839     if 'external' in ns.enabled_network_list:
840         net_config = ns['networks']['external'][0]
841         external = True
842         pool_start, pool_end = net_config['floating_ip_range']
843     else:
844         net_config = ns['networks']['admin']
845         external = False
846         pool_start, pool_end = ns['apex']['networks']['admin'][
847             'introspection_range']
848     nic_config = net_config['nic_mapping']
849     gateway = net_config['gateway']
850     cmds = list()
851     # create network command
852     if nic_config['compute']['vlan'] == 'native':
853         ext_type = 'flat'
854     else:
855         ext_type = "vlan --provider-segment {}".format(nic_config[
856                                                        'compute']['vlan'])
857     cmds.append("openstack network create external --project service "
858                 "--external --provider-network-type {} "
859                 "--provider-physical-network {}"
860                 .format(ext_type, external_physnet))
861     # create subnet command
862     cidr = net_config['cidr']
863     subnet_cmd = "openstack subnet create external-subnet --project " \
864                  "service --network external --no-dhcp --gateway {} " \
865                  "--allocation-pool start={},end={} --subnet-range " \
866                  "{}".format(gateway, pool_start, pool_end, str(cidr))
867     if external and cidr.version == 6:
868         subnet_cmd += ' --ip-version 6'
869     cmds.append(subnet_cmd)
870     logging.debug("Neutron external network commands determined "
871                   "as: {}".format(cmds))
872     return cmds
873
874
875 def create_congress_cmds(overcloud_file):
876     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
877     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
878     logging.info("Creating congress commands")
879     try:
880         ds_cfg = [
881             "username={}".format(overcloudrc['OS_USERNAME']),
882             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
883             "password={}".format(overcloudrc['OS_PASSWORD']),
884             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
885         ]
886     except KeyError:
887         logging.error("Unable to find all keys required for congress in "
888                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
889                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
890                       "file: {}".format(overcloud_file))
891         raise
892     cmds = list()
893     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
894
895     for driver in drivers:
896         if driver == 'doctor':
897             cmd = "{} \"{}\"".format(driver, driver)
898         else:
899             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
900         if driver == 'nova':
901             cmd += ' --config api_version="2.34"'
902         logging.debug("Congress command created: {}".format(cmd))
903         cmds.append(cmd)
904     return cmds