Modify common patches schema to per branch
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import pprint
16 import shutil
17 import uuid
18 import struct
19 import time
20 import yaml
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
23
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
30     crypto_serialization
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33     crypto_default_backend
34
35
36 SDN_FILE_MAP = {
37     'opendaylight': {
38         'sfc': 'neutron-sfc-opendaylight.yaml',
39         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40         'gluon': 'gluon.yaml',
41         'vpp': {
42             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44             'default': 'neutron-opendaylight-honeycomb.yaml'
45         },
46         'l2gw': 'neutron-l2gw-opendaylight.yaml',
47         'sriov': 'neutron-opendaylight-sriov.yaml',
48         'default': 'neutron-opendaylight.yaml',
49     },
50     'onos': {
51         'sfc': 'neutron-onos-sfc.yaml',
52         'default': 'neutron-onos.yaml'
53     },
54     'ovn': 'neutron-ml2-ovn.yaml',
55     False: {
56         'vpp': 'neutron-ml2-vpp.yaml',
57         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
58     }
59 }
60
61 OTHER_FILE_MAP = {
62     'tacker': 'enable_tacker.yaml',
63     'congress': 'enable_congress.yaml',
64     'barometer': 'enable_barometer.yaml',
65     'rt_kvm': 'enable_rt_kvm.yaml'
66 }
67
68 OVS_PERF_MAP = {
69     'HostCpusList': 'dpdk_cores',
70     'NeutronDpdkCoreList': 'pmd_cores',
71     'NeutronDpdkSocketMemory': 'socket_memory',
72     'NeutronDpdkMemoryChannels': 'memory_channels'
73 }
74
75 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
76 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
77 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
78                       ".noarch.rpm"
79
80 LOOP_DEVICE_SIZE = "10G"
81
82 LOSETUP_SERVICE = """[Unit]
83 Description=Setup loop devices
84 Before=network.target
85
86 [Service]
87 Type=oneshot
88 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
89 ExecStop=/sbin/losetup -d /dev/loop3
90 TimeoutSec=60
91 RemainAfterExit=yes
92
93 [Install]
94 WantedBy=multi-user.target
95 """
96
97 DUPLICATE_COMPUTE_SERVICES = [
98     'OS::TripleO::Services::ComputeNeutronCorePlugin',
99     'OS::TripleO::Services::ComputeNeutronMetadataAgent',
100     'OS::TripleO::Services::ComputeNeutronOvsAgent',
101     'OS::TripleO::Services::ComputeNeutronL3Agent'
102 ]
103
104
105 def build_sdn_env_list(ds, sdn_map, env_list=None):
106     """
107     Builds a list of SDN environment files to be used in the deploy cmd.
108
109     This function recursively searches an sdn_map.  First the sdn controller is
110     matched and then the function looks for enabled features for that
111     controller to determine which environment files should be used.  By
112     default the feature will be checked if set to true in deploy settings to be
113     added to the list.  If a feature does not have a boolean value, then the
114     key and value pair to compare with are checked as a tuple (k,v).
115
116     :param ds: deploy settings
117     :param sdn_map: SDN map to recursively search
118     :param env_list: recursive var to hold previously found env_list
119     :return: A list of env files
120     """
121     if env_list is None:
122         env_list = list()
123     for k, v in sdn_map.items():
124         if ds['sdn_controller'] == k or (k in ds and ds[k]):
125             if isinstance(v, dict):
126                 # Append default SDN env file first
127                 # The assumption is that feature-enabled SDN env files
128                 # override and do not conflict with previously set default
129                 # settings
130                 if ds['sdn_controller'] == k and 'default' in v:
131                     env_list.append(os.path.join(con.THT_ENV_DIR,
132                                                  v['default']))
133                 env_list.extend(build_sdn_env_list(ds, v))
134             # check if the value is not a boolean
135             elif isinstance(v, tuple):
136                     if ds[k] == v[0]:
137                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
138             else:
139                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
140     if len(env_list) == 0:
141         try:
142             env_list.append(os.path.join(
143                 con.THT_ENV_DIR, sdn_map['default']))
144         except KeyError:
145             logging.warning("Unable to find default file for SDN")
146
147     return env_list
148
149
150 def get_docker_sdn_files(ds_opts):
151     """
152     Returns docker env file for detected SDN
153     :param ds_opts: deploy options
154     :return: list of docker THT env files for an SDN
155     """
156     docker_services = con.VALID_DOCKER_SERVICES
157     tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
158     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
159     for i, sdn_file in enumerate(sdn_env_list):
160         sdn_base = os.path.basename(sdn_file)
161         if sdn_base in docker_services:
162             if docker_services[sdn_base] is not None:
163                 sdn_env_list[i] = \
164                     os.path.join(tht_dir, docker_services[sdn_base])
165             else:
166                 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
167     return sdn_env_list
168
169
170 def create_deploy_cmd(ds, ns, inv, tmp_dir,
171                       virtual, env_file='opnfv-environment.yaml',
172                       net_data=False):
173
174     logging.info("Creating deployment command")
175     deploy_options = ['network-environment.yaml']
176
177     ds_opts = ds['deploy_options']
178
179     if ds_opts['containers']:
180         deploy_options.append(os.path.join(con.THT_ENV_DIR,
181                                            'docker.yaml'))
182
183     if ds['global_params']['ha_enabled']:
184         if ds_opts['containers']:
185             deploy_options.append(os.path.join(con.THT_ENV_DIR,
186                                                'docker-ha.yaml'))
187         else:
188             deploy_options.append(os.path.join(con.THT_ENV_DIR,
189                                                'puppet-pacemaker.yaml'))
190
191     if env_file:
192         deploy_options.append(env_file)
193
194     if ds_opts['containers']:
195         deploy_options.append('docker-images.yaml')
196         sdn_docker_files = get_docker_sdn_files(ds_opts)
197         for sdn_docker_file in sdn_docker_files:
198             deploy_options.append(sdn_docker_file)
199         if sdn_docker_files:
200             deploy_options.append('sdn-images.yaml')
201     else:
202         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
203
204     for k, v in OTHER_FILE_MAP.items():
205         if k in ds_opts and ds_opts[k]:
206             if ds_opts['containers']:
207                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
208                                                    "{}.yaml".format(k)))
209             else:
210                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
211
212     if ds_opts['ceph'] and 'csit' not in env_file:
213         prep_storage_env(ds, ns, virtual, tmp_dir)
214         deploy_options.append(os.path.join(con.THT_ENV_DIR,
215                                            'storage-environment.yaml'))
216     if ds_opts['sriov']:
217         prep_sriov_env(ds, tmp_dir)
218
219     # Check for 'k8s' here intentionally, as we may support other values
220     # such as openstack/openshift for 'vim' option.
221     if ds_opts['vim'] == 'k8s':
222         deploy_options.append('kubernetes-environment.yaml')
223
224     if virtual:
225         deploy_options.append('virtual-environment.yaml')
226     else:
227         deploy_options.append('baremetal-environment.yaml')
228
229     num_control, num_compute = inv.get_node_counts()
230     if num_control > 1 and not ds['global_params']['ha_enabled']:
231         num_control = 1
232     if platform.machine() == 'aarch64':
233         # aarch64 deploys were not completing in the default 90 mins.
234         # Not sure if this is related to the hardware the OOO support
235         # was developed on or the virtualization support in CentOS
236         # Either way it will probably get better over time  as the aarch
237         # support matures in CentOS and deploy time should be tested in
238         # the future so this multiplier can be removed.
239         con.DEPLOY_TIMEOUT *= 2
240     cmd = "openstack overcloud deploy --templates --timeout {} " \
241           .format(con.DEPLOY_TIMEOUT)
242     # build cmd env args
243     for option in deploy_options:
244         cmd += " -e {}".format(option)
245     cmd += " --ntp-server {}".format(ns['ntp'][0])
246     cmd += " --control-scale {}".format(num_control)
247     cmd += " --compute-scale {}".format(num_compute)
248     cmd += ' --control-flavor control --compute-flavor compute'
249     if net_data:
250         cmd += ' --networks-file network_data.yaml'
251     libvirt_type = 'kvm'
252     if virtual:
253         with open('/sys/module/kvm_intel/parameters/nested') as f:
254             nested_kvm = f.read().strip()
255             if nested_kvm != 'Y':
256                 libvirt_type = 'qemu'
257     cmd += ' --libvirt-type {}'.format(libvirt_type)
258     logging.info("Deploy command set: {}".format(cmd))
259
260     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
261         fh.write(cmd)
262     return cmd
263
264
265 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
266                patches=None):
267     """
268     Locates sdn image and preps for deployment.
269     :param ds: deploy settings
270     :param ns: network settings
271     :param img: sdn image
272     :param tmp_dir: dir to store modified sdn image
273     :param root_pw: password to configure for overcloud image
274     :param docker_tag: Docker image tag for RDO version (default None)
275     :param patches: List of patches to apply to overcloud image
276     :return: None
277     """
278     # TODO(trozet): Come up with a better way to organize this logic in this
279     # function
280     logging.info("Preparing image: {} for deployment".format(img))
281     if not os.path.isfile(img):
282         logging.error("Missing SDN image {}".format(img))
283         raise ApexDeployException("Missing SDN image file: {}".format(img))
284
285     ds_opts = ds['deploy_options']
286     virt_cmds = list()
287     sdn = ds_opts['sdn_controller']
288     patched_containers = set()
289     # we need this due to rhbz #1436021
290     # fixed in systemd-219-37.el7
291     if sdn is not False:
292         logging.info("Neutron openvswitch-agent disabled")
293         virt_cmds.extend([{
294             con.VIRT_RUN_CMD:
295                 "rm -f /etc/systemd/system/multi-user.target.wants/"
296                 "neutron-openvswitch-agent.service"},
297             {
298             con.VIRT_RUN_CMD:
299                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
300                 ".service"
301         }])
302
303     if ns.get('http_proxy', ''):
304         virt_cmds.append({
305             con.VIRT_RUN_CMD:
306                 "echo 'http_proxy={}' >> /etc/environment".format(
307                     ns['http_proxy'])})
308
309     if ns.get('https_proxy', ''):
310         virt_cmds.append({
311             con.VIRT_RUN_CMD:
312                 "echo 'https_proxy={}' >> /etc/environment".format(
313                     ns['https_proxy'])})
314
315     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
316     shutil.copyfile(img, tmp_oc_image)
317     logging.debug("Temporary overcloud image stored as: {}".format(
318         tmp_oc_image))
319
320     if ds_opts['vpn']:
321         oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
322         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
323         virt_cmds.append({
324             con.VIRT_RUN_CMD:
325                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
326                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
327         virt_cmds.append({
328             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
329                               "zrpcd_start.sh"})
330         virt_cmds.append({
331             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
332                               "init.d/zrpcd_start.sh' /etc/rc.local "})
333         virt_cmds.append({
334             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
335                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
336         logging.info("ZRPCD process started")
337
338     dataplane = ds_opts['dataplane']
339     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
340         logging.info("Enabling kernel modules for dpdk")
341         # file to module mapping
342         uio_types = {
343             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
344             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
345         }
346         for mod_file, mod in uio_types.items():
347             with open(mod_file, 'w') as fh:
348                 fh.write('#!/bin/bash\n')
349                 fh.write('exec /sbin/modprobe {}'.format(mod))
350                 fh.close()
351
352             virt_cmds.extend([
353                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
354                     mod_file)},
355                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
356                                    "{}".format(os.path.basename(mod_file))}
357             ])
358     if root_pw:
359         pw_op = "password:{}".format(root_pw)
360         virt_cmds.append({con.VIRT_PW: pw_op})
361
362     if dataplane == 'ovs':
363         if ds_opts['sfc']:
364             virt_cmds.extend([
365                 {con.VIRT_RUN_CMD: "yum -y install "
366                                    "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
367                                    "{}".format(OVS_NSH_KMOD_RPM)},
368                 {con.VIRT_RUN_CMD: "yum downgrade -y "
369                                    "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
370                                    "{}".format(OVS_NSH_RPM)}
371             ])
372         elif sdn == 'opendaylight':
373             # FIXME(trozet) remove this after RDO is updated with fix for
374             # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
375             ovs_file = os.path.basename(con.CUSTOM_OVS)
376             ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
377             utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
378                                             targets=[ovs_file])
379             virt_cmds.extend([
380                 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
381                                                                   ovs_file))},
382                 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
383                     ovs_file)}
384             ])
385     if dataplane == 'fdio':
386         # Patch neutron with using OVS external interface for router
387         # and add generic linux NS interface driver
388         virt_cmds.append(
389             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
390                                "-p1 < neutron-patch-NSDriver.patch"})
391         if sdn is False:
392             virt_cmds.extend([
393                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
394                 {con.VIRT_RUN_CMD: "yum install -y "
395                                    "/root/nosdn_vpp_rpms/*.rpm"}
396             ])
397
398     undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
399         'installer_vm']['ip']
400     if sdn == 'opendaylight':
401         oc_builder.inject_opendaylight(
402             odl_version=ds_opts['odl_version'],
403             image=tmp_oc_image,
404             tmp_dir=tmp_dir,
405             uc_ip=undercloud_admin_ip,
406             os_version=ds_opts['os_version'],
407             docker_tag=docker_tag,
408         )
409         if docker_tag:
410             patched_containers = patched_containers.union({'opendaylight'})
411
412     if patches:
413         if ds_opts['os_version'] == 'master':
414             branch = ds_opts['os_version']
415         else:
416             branch = "stable/{}".format(ds_opts['os_version'])
417         logging.info('Adding patches to overcloud')
418         patched_containers = patched_containers.union(
419             c_builder.add_upstream_patches(patches,
420                                            tmp_oc_image, tmp_dir,
421                                            branch,
422                                            uc_ip=undercloud_admin_ip,
423                                            docker_tag=docker_tag))
424     # if containers with ceph, and no ceph device we need to use a
425     # persistent loop device for Ceph OSDs
426     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
427         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
428         with open(tmp_losetup, 'w') as fh:
429             fh.write(LOSETUP_SERVICE)
430         virt_cmds.extend([
431             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
432              },
433             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
434                 .format(LOOP_DEVICE_SIZE)},
435             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
436             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
437         ])
438     # TODO(trozet) remove this after LP#173474 is fixed
439     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
440     virt_cmds.append(
441         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
442                            "ConditionPathExists".format(dhcp_unit)})
443     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
444     logging.info("Overcloud image customization complete")
445     return patched_containers
446
447
448 def make_ssh_key():
449     """
450     Creates public and private ssh keys with 1024 bit RSA encryption
451     :return: private, public key
452     """
453     key = rsa.generate_private_key(
454         backend=crypto_default_backend(),
455         public_exponent=65537,
456         key_size=1024
457     )
458
459     private_key = key.private_bytes(
460         crypto_serialization.Encoding.PEM,
461         crypto_serialization.PrivateFormat.PKCS8,
462         crypto_serialization.NoEncryption())
463     public_key = key.public_key().public_bytes(
464         crypto_serialization.Encoding.OpenSSH,
465         crypto_serialization.PublicFormat.OpenSSH
466     )
467     return private_key.decode('utf-8'), public_key.decode('utf-8')
468
469
470 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
471     """
472     Creates modified opnfv/network environments for deployment
473     :param ds: deploy settings
474     :param ns: network settings
475     :param inv: node inventory
476     :param opnfv_env: file path for opnfv-environment file
477     :param net_env: file path for network-environment file
478     :param tmp_dir: Apex tmp dir
479     :return:
480     """
481
482     logging.info("Preparing opnfv-environment and network-environment files")
483     ds_opts = ds['deploy_options']
484     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
485     shutil.copyfile(opnfv_env, tmp_opnfv_env)
486     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
487     tenant_nic = dict()
488     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
489     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
490     external_nic_map = ns['networks']['external'][0]['nic_mapping']
491     external_nic = dict()
492     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
493
494     # SSH keys
495     private_key, public_key = make_ssh_key()
496
497     num_control, num_compute = inv.get_node_counts()
498     if num_control > 1 and not ds['global_params']['ha_enabled']:
499         num_control = 1
500
501     # Make easier/faster variables to index in the file editor
502     if 'performance' in ds_opts:
503         perf = True
504         # vpp
505         if 'vpp' in ds_opts['performance']['Compute']:
506             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
507         else:
508             perf_vpp_comp = None
509         if 'vpp' in ds_opts['performance']['Controller']:
510             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
511         else:
512             perf_vpp_ctrl = None
513
514         # ovs
515         if 'ovs' in ds_opts['performance']['Compute']:
516             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
517         else:
518             perf_ovs_comp = None
519
520         # kernel
521         if 'kernel' in ds_opts['performance']['Compute']:
522             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
523         else:
524             perf_kern_comp = None
525     else:
526         perf = False
527
528     tenant_settings = ns['networks']['tenant']
529     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
530         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
531
532     # Modify OPNFV environment
533     # TODO: Change to build a dict and outputting yaml rather than parsing
534     for line in fileinput.input(tmp_opnfv_env, inplace=True):
535         line = line.strip('\n')
536         output_line = line
537         if 'CloudDomain' in line:
538             output_line = "  CloudDomain: {}".format(ns['domain_name'])
539         elif 'replace_private_key' in line:
540             output_line = "    private_key: |\n"
541             key_out = ''
542             for line in private_key.splitlines():
543                 key_out += "      {}\n".format(line)
544             output_line += key_out
545         elif 'replace_public_key' in line:
546             output_line = "    public_key: '{}'".format(public_key)
547         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
548                 'resource_registry' in line:
549             output_line = "resource_registry:\n" \
550                           "  OS::TripleO::NodeUserData: first-boot.yaml"
551         elif 'ComputeExtraConfigPre' in line and \
552                 ds_opts['dataplane'] == 'ovs_dpdk':
553             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
554                           './ovs-dpdk-preconfig.yaml'
555         elif 'NeutronNetworkVLANRanges' in line:
556             vlan_setting = ''
557             if tenant_vlan_enabled:
558                 if ns['networks']['tenant']['overlay_id_range']:
559                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
560                     if 'datacentre' not in vlan_setting:
561                         vlan_setting += ',datacentre:1:1000'
562             # SRIOV networks are VLAN based provider networks. In order to
563             # simplify the deployment, nfv_sriov will be the default physnet.
564             # VLANs are not needed in advance, and the user will have to create
565             # the network specifying the segmentation-id.
566             if ds_opts['sriov']:
567                 if vlan_setting:
568                     vlan_setting += ",nfv_sriov"
569                 else:
570                     vlan_setting = "datacentre:1:1000,nfv_sriov"
571             if vlan_setting:
572                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
573         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
574             if tenant_settings['overlay_id_range']:
575                 physnets = tenant_settings['overlay_id_range'].split(',')
576                 output_line = "  NeutronBridgeMappings: "
577                 for physnet in physnets:
578                     physnet_name = physnet.split(':')[0]
579                     if physnet_name != 'datacentre':
580                         output_line += "{}:br-vlan,".format(physnet_name)
581                 output_line += "datacentre:br-ex"
582         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
583                 and ds_opts['sdn_controller'] == 'opendaylight':
584             if tenant_settings['overlay_id_range']:
585                 physnets = tenant_settings['overlay_id_range'].split(',')
586                 output_line = "  OpenDaylightProviderMappings: "
587                 for physnet in physnets:
588                     physnet_name = physnet.split(':')[0]
589                     if physnet_name != 'datacentre':
590                         output_line += "{}:br-vlan,".format(physnet_name)
591                 output_line += "datacentre:br-ex"
592         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
593             output_line = "  NeutronNetworkType: vlan\n" \
594                           "  NeutronTunnelTypes: ''"
595
596         if ds_opts['sdn_controller'] == 'opendaylight' and \
597                 'odl_vpp_routing_node' in ds_opts:
598             if 'opendaylight::vpp_routing_node' in line:
599                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
600                                .format(ds_opts['odl_vpp_routing_node'],
601                                        ns['domain_name']))
602         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
603             if 'NeutronVPPAgentPhysnets' in line:
604                 # VPP interface tap0 will be used for external network
605                 # connectivity.
606                 output_line = ("  NeutronVPPAgentPhysnets: "
607                                "'datacentre:{},external:tap0'"
608                                .format(tenant_nic['Controller']))
609         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
610                 'dvr') is True:
611             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
612                 output_line = ''
613             elif 'NeutronDhcpAgentsPerNetwork' in line:
614                 if num_compute == 0:
615                     num_dhcp_agents = num_control
616                 else:
617                     num_dhcp_agents = num_compute
618                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
619                                .format(num_dhcp_agents))
620             elif 'ComputeServices' in line:
621                 output_line = ("  ComputeServices:\n"
622                                "    - OS::TripleO::Services::NeutronDhcpAgent")
623
624         if perf:
625             for role in 'NovaCompute', 'Controller':
626                 if role == 'NovaCompute':
627                     perf_opts = perf_vpp_comp
628                 else:
629                     perf_opts = perf_vpp_ctrl
630                 cfg = "{}ExtraConfig".format(role)
631                 if cfg in line and perf_opts:
632                     perf_line = ''
633                     if 'main-core' in perf_opts:
634                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
635                                       .format(perf_opts['main-core']))
636                     if 'corelist-workers' in perf_opts:
637                         perf_line += ("\n    "
638                                       "fdio::vpp_cpu_corelist_workers: '{}'"
639                                       .format(perf_opts['corelist-workers']))
640                     if ds_opts['sdn_controller'] == 'opendaylight' and \
641                             ds_opts['dataplane'] == 'fdio':
642                         if role == 'NovaCompute':
643                             perf_line += ("\n    "
644                                           "tripleo::profile::base::neutron::"
645                                           "agents::honeycomb::"
646                                           "interface_role_mapping:"
647                                           " ['{}:tenant-interface',"
648                                           "'{}:public-interface']"
649                                           .format(tenant_nic[role],
650                                                   external_nic[role]))
651                         else:
652                             perf_line += ("\n    "
653                                           "tripleo::profile::base::neutron::"
654                                           "agents::honeycomb::"
655                                           "interface_role_mapping:"
656                                           " ['{}:tenant-interface']"
657                                           .format(tenant_nic[role]))
658                     if perf_line:
659                         output_line = ("  {}:{}".format(cfg, perf_line))
660
661             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
662                 for k, v in OVS_PERF_MAP.items():
663                     if k in line and v in perf_ovs_comp:
664                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
665
666             # kernel args
667             # (FIXME) use compute's kernel settings for all nodes for now.
668             if perf_kern_comp:
669                 if 'NovaSchedulerDefaultFilters' in line:
670                     output_line = \
671                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
672                         "ComputeFilter,AvailabilityZoneFilter," \
673                         "ComputeCapabilitiesFilter," \
674                         "ImagePropertiesFilter,NUMATopologyFilter'"
675                 elif 'ComputeKernelArgs' in line:
676                     kernel_args = ''
677                     for k, v in perf_kern_comp.items():
678                         kernel_args += "{}={} ".format(k, v)
679                     if kernel_args:
680                         output_line = "  ComputeKernelArgs: '{}'".\
681                             format(kernel_args)
682
683         print(output_line)
684
685     # Merge compute services into control services if only a single
686     # node deployment
687     if num_compute == 0:
688         logging.info("All in one deployment. Checking if service merging "
689                      "required into control services")
690         with open(tmp_opnfv_env, 'r') as fh:
691             data = yaml.safe_load(fh)
692         param_data = data['parameter_defaults']
693         # Check to see if any parameters are set for Compute
694         for param in param_data.keys():
695             if param != 'ComputeServices' and param.startswith('Compute'):
696                 logging.warning("Compute parameter set, but will not be used "
697                                 "in deployment: {}. Please use Controller "
698                                 "based parameters when using All-in-one "
699                                 "deployments".format(param))
700         if ('ControllerServices' in param_data and 'ComputeServices' in
701                 param_data):
702             logging.info("Services detected in environment file. Merging...")
703             ctrl_services = param_data['ControllerServices']
704             cmp_services = param_data['ComputeServices']
705             param_data['ControllerServices'] = list(set().union(
706                 ctrl_services, cmp_services))
707             for dup_service in DUPLICATE_COMPUTE_SERVICES:
708                 if dup_service in param_data['ControllerServices']:
709                     param_data['ControllerServices'].remove(dup_service)
710             param_data.pop('ComputeServices')
711             logging.debug("Merged controller services: {}".format(
712                 pprint.pformat(param_data['ControllerServices'])
713             ))
714             with open(tmp_opnfv_env, 'w') as fh:
715                 yaml.safe_dump(data, fh, default_flow_style=False)
716         else:
717             logging.info("No services detected in env file, not merging "
718                          "services")
719
720     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
721
722
723 def generate_ceph_key():
724     key = os.urandom(16)
725     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
726     return base64.b64encode(header + key)
727
728
729 def prep_storage_env(ds, ns, virtual, tmp_dir):
730     """
731     Creates storage environment file for deployment.  Source file is copied by
732     undercloud playbook to host.
733     :param ds:
734     :param ns:
735     :param virtual:
736     :param tmp_dir:
737     :return:
738     """
739     ds_opts = ds['deploy_options']
740     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
741     if not os.path.isfile(storage_file):
742         logging.error("storage-environment file is not in tmp directory: {}. "
743                       "Check if file was copied from "
744                       "undercloud".format(tmp_dir))
745         raise ApexDeployException("storage-environment file not copied from "
746                                   "undercloud")
747     for line in fileinput.input(storage_file, inplace=True):
748         line = line.strip('\n')
749         if 'CephClusterFSID' in line:
750             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
751         elif 'CephMonKey' in line:
752             print("  CephMonKey: {}".format(generate_ceph_key().decode(
753                 'utf-8')))
754         elif 'CephAdminKey' in line:
755             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
756                 'utf-8')))
757         elif 'CephClientKey' in line:
758             print("  CephClientKey: {}".format(generate_ceph_key().decode(
759                 'utf-8')))
760         else:
761             print(line)
762
763     if ds_opts['containers']:
764         ceph_params = {}
765
766         # max pgs allowed are calculated as num_mons * 200. Therefore we
767         # set number of pgs and pools so that the total will be less:
768         # num_pgs * num_pools * num_osds
769         ceph_params['CephPoolDefaultSize'] = 2
770         ceph_params['CephPoolDefaultPgNum'] = 32
771         if virtual:
772             ceph_params['CephAnsibleExtraConfig'] = {
773                 'centos_package_dependencies': [],
774                 'ceph_osd_docker_memory_limit': '1g',
775                 'ceph_mds_docker_memory_limit': '1g',
776             }
777         ceph_device = ds_opts['ceph_device']
778         ceph_params['CephAnsibleDisksConfig'] = {
779             'devices': [ceph_device],
780             'journal_size': 512,
781             'osd_scenario': 'collocated'
782         }
783         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
784     # TODO(trozet): remove following block as we only support containers now
785     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
786         with open(storage_file, 'a') as fh:
787             fh.write('  ExtraConfig:\n')
788             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
789                 ds_opts['ceph_device']
790             ))
791
792
793 def prep_sriov_env(ds, tmp_dir):
794     """
795     Creates SRIOV environment file for deployment. Source file is copied by
796     undercloud playbook to host.
797     :param ds:
798     :param tmp_dir:
799     :return:
800     """
801     ds_opts = ds['deploy_options']
802     sriov_iface = ds_opts['sriov']
803     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
804     if not os.path.isfile(sriov_file):
805         logging.error("sriov-environment file is not in tmp directory: {}. "
806                       "Check if file was copied from "
807                       "undercloud".format(tmp_dir))
808         raise ApexDeployException("sriov-environment file not copied from "
809                                   "undercloud")
810     # TODO(rnoriega): Instead of line editing, refactor this code to load
811     # yaml file into a dict, edit it and write the file back.
812     for line in fileinput.input(sriov_file, inplace=True):
813         line = line.strip('\n')
814         if 'NovaSchedulerDefaultFilters' in line:
815             print("  {}".format(line[3:]))
816         elif 'NovaSchedulerAvailableFilters' in line:
817             print("  {}".format(line[3:]))
818         elif 'NeutronPhysicalDevMappings' in line:
819             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
820                   .format(sriov_iface))
821         elif 'NeutronSriovNumVFs' in line:
822             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
823         elif 'NovaPCIPassthrough' in line:
824             print("  NovaPCIPassthrough:")
825         elif 'devname' in line:
826             print("    - devname: \"{}\"".format(sriov_iface))
827         elif 'physical_network' in line:
828             print("      physical_network: \"nfv_sriov\"")
829         else:
830             print(line)
831
832
833 def external_network_cmds(ns, ds):
834     """
835     Generates external network openstack commands
836     :param ns: network settings
837     :param ds: deploy settings
838     :return: list of commands to configure external network
839     """
840     ds_opts = ds['deploy_options']
841     external_physnet = 'datacentre'
842     if ds_opts['dataplane'] == 'fdio' and \
843        ds_opts['sdn_controller'] != 'opendaylight':
844         external_physnet = 'external'
845     if 'external' in ns.enabled_network_list:
846         net_config = ns['networks']['external'][0]
847         external = True
848         pool_start, pool_end = net_config['floating_ip_range']
849     else:
850         net_config = ns['networks']['admin']
851         external = False
852         pool_start, pool_end = ns['apex']['networks']['admin'][
853             'introspection_range']
854     nic_config = net_config['nic_mapping']
855     gateway = net_config['gateway']
856     cmds = list()
857     # create network command
858     if nic_config['compute']['vlan'] == 'native':
859         ext_type = 'flat'
860     else:
861         ext_type = "vlan --provider-segment {}".format(nic_config[
862                                                        'compute']['vlan'])
863     cmds.append("openstack network create external --project service "
864                 "--external --provider-network-type {} "
865                 "--provider-physical-network {}"
866                 .format(ext_type, external_physnet))
867     # create subnet command
868     cidr = net_config['cidr']
869     subnet_cmd = "openstack subnet create external-subnet --project " \
870                  "service --network external --no-dhcp --gateway {} " \
871                  "--allocation-pool start={},end={} --subnet-range " \
872                  "{}".format(gateway, pool_start, pool_end, str(cidr))
873     if external and cidr.version == 6:
874         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
875                       '--ipv6-address-mode slaac'
876     cmds.append(subnet_cmd)
877     logging.debug("Neutron external network commands determined "
878                   "as: {}".format(cmds))
879     return cmds
880
881
882 def create_congress_cmds(overcloud_file):
883     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
884     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
885     logging.info("Creating congress commands")
886     try:
887         ds_cfg = [
888             "username={}".format(overcloudrc['OS_USERNAME']),
889             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
890             "password={}".format(overcloudrc['OS_PASSWORD']),
891             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
892         ]
893     except KeyError:
894         logging.error("Unable to find all keys required for congress in "
895                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
896                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
897                       "file: {}".format(overcloud_file))
898         raise
899     cmds = list()
900     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
901
902     for driver in drivers:
903         if driver == 'doctor':
904             cmd = "{} \"{}\"".format(driver, driver)
905         else:
906             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
907         if driver == 'nova':
908             cmd += ' --config api_version="2.34"'
909         logging.debug("Congress command created: {}".format(cmd))
910         cmds.append(cmd)
911     return cmds