Enable CSIT deployments to use NFS backend
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import pprint
16 import shutil
17 import uuid
18 import struct
19 import time
20 import yaml
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
23
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
30     crypto_serialization
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33     crypto_default_backend
34
35
36 SDN_FILE_MAP = {
37     'opendaylight': {
38         'sfc': 'neutron-sfc-opendaylight.yaml',
39         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40         'gluon': 'gluon.yaml',
41         'vpp': {
42             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44             'default': 'neutron-opendaylight-honeycomb.yaml'
45         },
46         'l2gw': 'neutron-l2gw-opendaylight.yaml',
47         'sriov': 'neutron-opendaylight-sriov.yaml',
48         'default': 'neutron-opendaylight.yaml',
49     },
50     'onos': {
51         'sfc': 'neutron-onos-sfc.yaml',
52         'default': 'neutron-onos.yaml'
53     },
54     'ovn': 'neutron-ml2-ovn.yaml',
55     False: {
56         'vpp': 'neutron-ml2-vpp.yaml',
57         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
58     }
59 }
60
61 OTHER_FILE_MAP = {
62     'tacker': 'enable_tacker.yaml',
63     'congress': 'enable_congress.yaml',
64     'barometer': 'enable_barometer.yaml',
65     'rt_kvm': 'enable_rt_kvm.yaml'
66 }
67
68 OVS_PERF_MAP = {
69     'HostCpusList': 'dpdk_cores',
70     'NeutronDpdkCoreList': 'pmd_cores',
71     'NeutronDpdkSocketMemory': 'socket_memory',
72     'NeutronDpdkMemoryChannels': 'memory_channels'
73 }
74
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95 DUPLICATE_COMPUTE_SERVICES = [
96     'OS::TripleO::Services::ComputeNeutronCorePlugin',
97     'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98     'OS::TripleO::Services::ComputeNeutronOvsAgent',
99     'OS::TripleO::Services::ComputeNeutronL3Agent'
100 ]
101
102
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
104     """
105     Builds a list of SDN environment files to be used in the deploy cmd.
106
107     This function recursively searches an sdn_map.  First the sdn controller is
108     matched and then the function looks for enabled features for that
109     controller to determine which environment files should be used.  By
110     default the feature will be checked if set to true in deploy settings to be
111     added to the list.  If a feature does not have a boolean value, then the
112     key and value pair to compare with are checked as a tuple (k,v).
113
114     :param ds: deploy settings
115     :param sdn_map: SDN map to recursively search
116     :param env_list: recursive var to hold previously found env_list
117     :return: A list of env files
118     """
119     if env_list is None:
120         env_list = list()
121     for k, v in sdn_map.items():
122         if ds['sdn_controller'] == k or (k in ds and ds[k]):
123             if isinstance(v, dict):
124                 # Append default SDN env file first
125                 # The assumption is that feature-enabled SDN env files
126                 # override and do not conflict with previously set default
127                 # settings
128                 if ds['sdn_controller'] == k and 'default' in v:
129                     env_list.append(os.path.join(con.THT_ENV_DIR,
130                                                  v['default']))
131                 env_list.extend(build_sdn_env_list(ds, v))
132             # check if the value is not a boolean
133             elif isinstance(v, tuple):
134                     if ds[k] == v[0]:
135                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
136             else:
137                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138     if len(env_list) == 0:
139         try:
140             env_list.append(os.path.join(
141                 con.THT_ENV_DIR, sdn_map['default']))
142         except KeyError:
143             logging.warning("Unable to find default file for SDN")
144
145     return env_list
146
147
148 def get_docker_sdn_files(ds_opts):
149     """
150     Returns docker env file for detected SDN
151     :param ds_opts: deploy options
152     :return: list of docker THT env files for an SDN
153     """
154     docker_services = con.VALID_DOCKER_SERVICES
155     tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
156     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157     for i, sdn_file in enumerate(sdn_env_list):
158         sdn_base = os.path.basename(sdn_file)
159         if sdn_base in docker_services:
160             if docker_services[sdn_base] is not None:
161                 sdn_env_list[i] = \
162                     os.path.join(tht_dir, docker_services[sdn_base])
163             else:
164                 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
165     return sdn_env_list
166
167
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169                       virtual, env_file='opnfv-environment.yaml',
170                       net_data=False):
171
172     logging.info("Creating deployment command")
173     deploy_options = ['network-environment.yaml']
174
175     ds_opts = ds['deploy_options']
176
177     if ds_opts['containers']:
178         deploy_options.append(os.path.join(con.THT_ENV_DIR,
179                                            'docker.yaml'))
180
181     if ds['global_params']['ha_enabled']:
182         if ds_opts['containers']:
183             deploy_options.append(os.path.join(con.THT_ENV_DIR,
184                                                'docker-ha.yaml'))
185         else:
186             deploy_options.append(os.path.join(con.THT_ENV_DIR,
187                                                'puppet-pacemaker.yaml'))
188
189     if env_file:
190         deploy_options.append(env_file)
191
192     if ds_opts['containers']:
193         deploy_options.append('docker-images.yaml')
194         sdn_docker_files = get_docker_sdn_files(ds_opts)
195         for sdn_docker_file in sdn_docker_files:
196             deploy_options.append(sdn_docker_file)
197         if sdn_docker_files:
198             deploy_options.append('sdn-images.yaml')
199     else:
200         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
201
202     for k, v in OTHER_FILE_MAP.items():
203         if k in ds_opts and ds_opts[k]:
204             if ds_opts['containers']:
205                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
206                                                    "{}.yaml".format(k)))
207             else:
208                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
209
210     # TODO(trozet) Fix this check to look for if ceph is in controller services
211     # and not use name of the file
212     if ds_opts['ceph'] and 'csit' not in env_file:
213         prep_storage_env(ds, ns, virtual, tmp_dir)
214         deploy_options.append(os.path.join(con.THT_ENV_DIR,
215                                            'storage-environment.yaml'))
216     if ds_opts['sriov']:
217         prep_sriov_env(ds, tmp_dir)
218
219     # Check for 'k8s' here intentionally, as we may support other values
220     # such as openstack/openshift for 'vim' option.
221     if ds_opts['vim'] == 'k8s':
222         deploy_options.append('kubernetes-environment.yaml')
223
224     if virtual:
225         deploy_options.append('virtual-environment.yaml')
226     else:
227         deploy_options.append('baremetal-environment.yaml')
228
229     num_control, num_compute = inv.get_node_counts()
230     if num_control > 1 and not ds['global_params']['ha_enabled']:
231         num_control = 1
232     if platform.machine() == 'aarch64':
233         # aarch64 deploys were not completing in the default 90 mins.
234         # Not sure if this is related to the hardware the OOO support
235         # was developed on or the virtualization support in CentOS
236         # Either way it will probably get better over time  as the aarch
237         # support matures in CentOS and deploy time should be tested in
238         # the future so this multiplier can be removed.
239         con.DEPLOY_TIMEOUT *= 2
240     cmd = "openstack overcloud deploy --templates --timeout {} " \
241           .format(con.DEPLOY_TIMEOUT)
242     # build cmd env args
243     for option in deploy_options:
244         cmd += " -e {}".format(option)
245     cmd += " --ntp-server {}".format(ns['ntp'][0])
246     cmd += " --control-scale {}".format(num_control)
247     cmd += " --compute-scale {}".format(num_compute)
248     cmd += ' --control-flavor control --compute-flavor compute'
249     if net_data:
250         cmd += ' --networks-file network_data.yaml'
251     libvirt_type = 'kvm'
252     if virtual:
253         with open('/sys/module/kvm_intel/parameters/nested') as f:
254             nested_kvm = f.read().strip()
255             if nested_kvm != 'Y':
256                 libvirt_type = 'qemu'
257     cmd += ' --libvirt-type {}'.format(libvirt_type)
258     logging.info("Deploy command set: {}".format(cmd))
259
260     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
261         fh.write(cmd)
262     return cmd
263
264
265 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
266                patches=None):
267     """
268     Locates sdn image and preps for deployment.
269     :param ds: deploy settings
270     :param ns: network settings
271     :param img: sdn image
272     :param tmp_dir: dir to store modified sdn image
273     :param root_pw: password to configure for overcloud image
274     :param docker_tag: Docker image tag for RDO version (default None)
275     :param patches: List of patches to apply to overcloud image
276     :return: None
277     """
278     # TODO(trozet): Come up with a better way to organize this logic in this
279     # function
280     logging.info("Preparing image: {} for deployment".format(img))
281     if not os.path.isfile(img):
282         logging.error("Missing SDN image {}".format(img))
283         raise ApexDeployException("Missing SDN image file: {}".format(img))
284
285     ds_opts = ds['deploy_options']
286     virt_cmds = list()
287     sdn = ds_opts['sdn_controller']
288     patched_containers = set()
289     # we need this due to rhbz #1436021
290     # fixed in systemd-219-37.el7
291     if sdn is not False:
292         logging.info("Neutron openvswitch-agent disabled")
293         virt_cmds.extend([{
294             con.VIRT_RUN_CMD:
295                 "rm -f /etc/systemd/system/multi-user.target.wants/"
296                 "neutron-openvswitch-agent.service"},
297             {
298             con.VIRT_RUN_CMD:
299                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
300                 ".service"
301         }])
302
303     if ns.get('http_proxy', ''):
304         virt_cmds.append({
305             con.VIRT_RUN_CMD:
306                 "echo 'http_proxy={}' >> /etc/environment".format(
307                     ns['http_proxy'])})
308
309     if ns.get('https_proxy', ''):
310         virt_cmds.append({
311             con.VIRT_RUN_CMD:
312                 "echo 'https_proxy={}' >> /etc/environment".format(
313                     ns['https_proxy'])})
314
315     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
316     shutil.copyfile(img, tmp_oc_image)
317     logging.debug("Temporary overcloud image stored as: {}".format(
318         tmp_oc_image))
319
320     if ds_opts['vpn']:
321         oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
322         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
323         virt_cmds.append({
324             con.VIRT_RUN_CMD:
325                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
326                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
327         virt_cmds.append({
328             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
329                               "zrpcd_start.sh"})
330         virt_cmds.append({
331             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
332                               "init.d/zrpcd_start.sh' /etc/rc.local "})
333         virt_cmds.append({
334             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
335                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
336         logging.info("ZRPCD process started")
337
338     dataplane = ds_opts['dataplane']
339     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
340         logging.info("Enabling kernel modules for dpdk")
341         # file to module mapping
342         uio_types = {
343             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
344             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
345         }
346         for mod_file, mod in uio_types.items():
347             with open(mod_file, 'w') as fh:
348                 fh.write('#!/bin/bash\n')
349                 fh.write('exec /sbin/modprobe {}'.format(mod))
350                 fh.close()
351
352             virt_cmds.extend([
353                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
354                     mod_file)},
355                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
356                                    "{}".format(os.path.basename(mod_file))}
357             ])
358     if root_pw:
359         pw_op = "password:{}".format(root_pw)
360         virt_cmds.append({con.VIRT_PW: pw_op})
361
362     if dataplane == 'ovs':
363         if ds_opts['sfc']:
364             oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
365         elif sdn == 'opendaylight':
366             # FIXME(trozet) remove this after RDO is updated with fix for
367             # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
368             ovs_file = os.path.basename(con.CUSTOM_OVS)
369             ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
370             utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
371                                             targets=[ovs_file])
372             virt_cmds.extend([
373                 {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
374                                                                   ovs_file))},
375                 {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
376                     ovs_file)}
377             ])
378
379     if dataplane == 'fdio':
380         # Patch neutron with using OVS external interface for router
381         # and add generic linux NS interface driver
382         virt_cmds.append(
383             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
384                                "-p1 < neutron-patch-NSDriver.patch"})
385         if sdn is False:
386             virt_cmds.extend([
387                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
388                 {con.VIRT_RUN_CMD: "yum install -y "
389                                    "/root/nosdn_vpp_rpms/*.rpm"}
390             ])
391
392     undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
393         'installer_vm']['ip']
394     if sdn == 'opendaylight':
395         oc_builder.inject_opendaylight(
396             odl_version=ds_opts['odl_version'],
397             image=tmp_oc_image,
398             tmp_dir=tmp_dir,
399             uc_ip=undercloud_admin_ip,
400             os_version=ds_opts['os_version'],
401             docker_tag=docker_tag,
402         )
403         if docker_tag:
404             patched_containers = patched_containers.union({'opendaylight'})
405
406     if patches:
407         if ds_opts['os_version'] == 'master':
408             branch = ds_opts['os_version']
409         else:
410             branch = "stable/{}".format(ds_opts['os_version'])
411         logging.info('Adding patches to overcloud')
412         patched_containers = patched_containers.union(
413             c_builder.add_upstream_patches(patches,
414                                            tmp_oc_image, tmp_dir,
415                                            branch,
416                                            uc_ip=undercloud_admin_ip,
417                                            docker_tag=docker_tag))
418     # if containers with ceph, and no ceph device we need to use a
419     # persistent loop device for Ceph OSDs
420     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
421         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
422         with open(tmp_losetup, 'w') as fh:
423             fh.write(LOSETUP_SERVICE)
424         virt_cmds.extend([
425             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
426              },
427             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
428                 .format(LOOP_DEVICE_SIZE)},
429             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
430             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
431         ])
432     # TODO(trozet) remove this after LP#173474 is fixed
433     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
434     virt_cmds.append(
435         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
436                            "ConditionPathExists".format(dhcp_unit)})
437     # Prep for NFS
438     virt_cmds.extend([
439         {con.VIRT_INSTALL: "nfs-utils"},
440         {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
441                            "/etc/systemd/system/multi-user.target.wants/"
442                            "nfs-server.service"},
443         {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/glance"},
444         {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/cinder"},
445         {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/nova"},
446         {con.VIRT_RUN_CMD: "echo '/root/nfs/glance *(rw,sync,"
447                            "no_root_squash,no_acl)' > /etc/exports"},
448         {con.VIRT_RUN_CMD: "echo '/root/nfs/cinder *(rw,sync,"
449                            "no_root_squash,no_acl)' >> /etc/exports"},
450         {con.VIRT_RUN_CMD: "echo '/root/nfs/nova *(rw,sync,"
451                            "no_root_squash,no_acl)' >> /etc/exports"},
452         {con.VIRT_RUN_CMD: "exportfs -avr"},
453     ])
454     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
455     logging.info("Overcloud image customization complete")
456     return patched_containers
457
458
459 def make_ssh_key():
460     """
461     Creates public and private ssh keys with 1024 bit RSA encryption
462     :return: private, public key
463     """
464     key = rsa.generate_private_key(
465         backend=crypto_default_backend(),
466         public_exponent=65537,
467         key_size=1024
468     )
469
470     private_key = key.private_bytes(
471         crypto_serialization.Encoding.PEM,
472         crypto_serialization.PrivateFormat.PKCS8,
473         crypto_serialization.NoEncryption())
474     public_key = key.public_key().public_bytes(
475         crypto_serialization.Encoding.OpenSSH,
476         crypto_serialization.PublicFormat.OpenSSH
477     )
478     return private_key.decode('utf-8'), public_key.decode('utf-8')
479
480
481 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
482     """
483     Creates modified opnfv/network environments for deployment
484     :param ds: deploy settings
485     :param ns: network settings
486     :param inv: node inventory
487     :param opnfv_env: file path for opnfv-environment file
488     :param net_env: file path for network-environment file
489     :param tmp_dir: Apex tmp dir
490     :return:
491     """
492
493     logging.info("Preparing opnfv-environment and network-environment files")
494     ds_opts = ds['deploy_options']
495     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
496     shutil.copyfile(opnfv_env, tmp_opnfv_env)
497     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
498     tenant_nic = dict()
499     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
500     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
501     external_nic_map = ns['networks']['external'][0]['nic_mapping']
502     external_nic = dict()
503     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
504
505     # SSH keys
506     private_key, public_key = make_ssh_key()
507
508     num_control, num_compute = inv.get_node_counts()
509     if num_control > 1 and not ds['global_params']['ha_enabled']:
510         num_control = 1
511
512     # Make easier/faster variables to index in the file editor
513     if 'performance' in ds_opts:
514         perf = True
515         # vpp
516         if 'vpp' in ds_opts['performance']['Compute']:
517             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
518         else:
519             perf_vpp_comp = None
520         if 'vpp' in ds_opts['performance']['Controller']:
521             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
522         else:
523             perf_vpp_ctrl = None
524
525         # ovs
526         if 'ovs' in ds_opts['performance']['Compute']:
527             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
528         else:
529             perf_ovs_comp = None
530
531         # kernel
532         if 'kernel' in ds_opts['performance']['Compute']:
533             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
534         else:
535             perf_kern_comp = None
536     else:
537         perf = False
538
539     tenant_settings = ns['networks']['tenant']
540     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
541         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
542
543     # Modify OPNFV environment
544     # TODO: Change to build a dict and outputting yaml rather than parsing
545     for line in fileinput.input(tmp_opnfv_env, inplace=True):
546         line = line.strip('\n')
547         output_line = line
548         if 'CloudDomain' in line:
549             output_line = "  CloudDomain: {}".format(ns['domain_name'])
550         elif 'replace_private_key' in line:
551             output_line = "    private_key: |\n"
552             key_out = ''
553             for line in private_key.splitlines():
554                 key_out += "      {}\n".format(line)
555             output_line += key_out
556         elif 'replace_public_key' in line:
557             output_line = "    public_key: '{}'".format(public_key)
558         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
559                 'resource_registry' in line:
560             output_line = "resource_registry:\n" \
561                           "  OS::TripleO::NodeUserData: first-boot.yaml"
562         elif 'ComputeExtraConfigPre' in line and \
563                 ds_opts['dataplane'] == 'ovs_dpdk':
564             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
565                           './ovs-dpdk-preconfig.yaml'
566         elif 'NeutronNetworkVLANRanges' in line:
567             vlan_setting = ''
568             if tenant_vlan_enabled:
569                 if ns['networks']['tenant']['overlay_id_range']:
570                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
571                     if 'datacentre' not in vlan_setting:
572                         vlan_setting += ',datacentre:1:1000'
573             # SRIOV networks are VLAN based provider networks. In order to
574             # simplify the deployment, nfv_sriov will be the default physnet.
575             # VLANs are not needed in advance, and the user will have to create
576             # the network specifying the segmentation-id.
577             if ds_opts['sriov']:
578                 if vlan_setting:
579                     vlan_setting += ",nfv_sriov"
580                 else:
581                     vlan_setting = "datacentre:1:1000,nfv_sriov"
582             if vlan_setting:
583                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
584         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
585             if tenant_settings['overlay_id_range']:
586                 physnets = tenant_settings['overlay_id_range'].split(',')
587                 output_line = "  NeutronBridgeMappings: "
588                 for physnet in physnets:
589                     physnet_name = physnet.split(':')[0]
590                     if physnet_name != 'datacentre':
591                         output_line += "{}:br-vlan,".format(physnet_name)
592                 output_line += "datacentre:br-ex"
593         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
594                 and ds_opts['sdn_controller'] == 'opendaylight':
595             if tenant_settings['overlay_id_range']:
596                 physnets = tenant_settings['overlay_id_range'].split(',')
597                 output_line = "  OpenDaylightProviderMappings: "
598                 for physnet in physnets:
599                     physnet_name = physnet.split(':')[0]
600                     if physnet_name != 'datacentre':
601                         output_line += "{}:br-vlan,".format(physnet_name)
602                 output_line += "datacentre:br-ex"
603         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
604             output_line = "  NeutronNetworkType: vlan\n" \
605                           "  NeutronTunnelTypes: ''"
606
607         if ds_opts['sdn_controller'] == 'opendaylight' and \
608                 'odl_vpp_routing_node' in ds_opts:
609             if 'opendaylight::vpp_routing_node' in line:
610                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
611                                .format(ds_opts['odl_vpp_routing_node'],
612                                        ns['domain_name']))
613         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
614             if 'NeutronVPPAgentPhysnets' in line:
615                 # VPP interface tap0 will be used for external network
616                 # connectivity.
617                 output_line = ("  NeutronVPPAgentPhysnets: "
618                                "'datacentre:{},external:tap0'"
619                                .format(tenant_nic['Controller']))
620         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
621                 'dvr') is True:
622             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
623                 output_line = ''
624             elif 'NeutronDhcpAgentsPerNetwork' in line:
625                 if num_compute == 0:
626                     num_dhcp_agents = num_control
627                 else:
628                     num_dhcp_agents = num_compute
629                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
630                                .format(num_dhcp_agents))
631             elif 'ComputeServices' in line:
632                 output_line = ("  ComputeServices:\n"
633                                "    - OS::TripleO::Services::NeutronDhcpAgent")
634
635         if perf:
636             for role in 'NovaCompute', 'Controller':
637                 if role == 'NovaCompute':
638                     perf_opts = perf_vpp_comp
639                 else:
640                     perf_opts = perf_vpp_ctrl
641                 cfg = "{}ExtraConfig".format(role)
642                 if cfg in line and perf_opts:
643                     perf_line = ''
644                     if 'main-core' in perf_opts:
645                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
646                                       .format(perf_opts['main-core']))
647                     if 'corelist-workers' in perf_opts:
648                         perf_line += ("\n    "
649                                       "fdio::vpp_cpu_corelist_workers: '{}'"
650                                       .format(perf_opts['corelist-workers']))
651                     if ds_opts['sdn_controller'] == 'opendaylight' and \
652                             ds_opts['dataplane'] == 'fdio':
653                         if role == 'NovaCompute':
654                             perf_line += ("\n    "
655                                           "tripleo::profile::base::neutron::"
656                                           "agents::honeycomb::"
657                                           "interface_role_mapping:"
658                                           " ['{}:tenant-interface',"
659                                           "'{}:public-interface']"
660                                           .format(tenant_nic[role],
661                                                   external_nic[role]))
662                         else:
663                             perf_line += ("\n    "
664                                           "tripleo::profile::base::neutron::"
665                                           "agents::honeycomb::"
666                                           "interface_role_mapping:"
667                                           " ['{}:tenant-interface']"
668                                           .format(tenant_nic[role]))
669                     if perf_line:
670                         output_line = ("  {}:{}".format(cfg, perf_line))
671
672             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
673                 for k, v in OVS_PERF_MAP.items():
674                     if k in line and v in perf_ovs_comp:
675                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
676
677             # kernel args
678             # (FIXME) use compute's kernel settings for all nodes for now.
679             if perf_kern_comp:
680                 if 'NovaSchedulerDefaultFilters' in line:
681                     output_line = \
682                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
683                         "ComputeFilter,AvailabilityZoneFilter," \
684                         "ComputeCapabilitiesFilter," \
685                         "ImagePropertiesFilter,NUMATopologyFilter'"
686                 elif 'ComputeKernelArgs' in line:
687                     kernel_args = ''
688                     for k, v in perf_kern_comp.items():
689                         kernel_args += "{}={} ".format(k, v)
690                     if kernel_args:
691                         output_line = "  ComputeKernelArgs: '{}'".\
692                             format(kernel_args)
693
694         print(output_line)
695
696     # Merge compute services into control services if only a single
697     # node deployment
698     if num_compute == 0:
699         logging.info("All in one deployment. Checking if service merging "
700                      "required into control services")
701         with open(tmp_opnfv_env, 'r') as fh:
702             data = yaml.safe_load(fh)
703         param_data = data['parameter_defaults']
704         # Check to see if any parameters are set for Compute
705         for param in param_data.keys():
706             if param != 'ComputeServices' and param.startswith('Compute'):
707                 logging.warning("Compute parameter set, but will not be used "
708                                 "in deployment: {}. Please use Controller "
709                                 "based parameters when using All-in-one "
710                                 "deployments".format(param))
711         if ('ControllerServices' in param_data and 'ComputeServices' in
712                 param_data):
713             logging.info("Services detected in environment file. Merging...")
714             ctrl_services = param_data['ControllerServices']
715             cmp_services = param_data['ComputeServices']
716             param_data['ControllerServices'] = list(set().union(
717                 ctrl_services, cmp_services))
718             for dup_service in DUPLICATE_COMPUTE_SERVICES:
719                 if dup_service in param_data['ControllerServices']:
720                     param_data['ControllerServices'].remove(dup_service)
721             param_data.pop('ComputeServices')
722             logging.debug("Merged controller services: {}".format(
723                 pprint.pformat(param_data['ControllerServices'])
724             ))
725             with open(tmp_opnfv_env, 'w') as fh:
726                 yaml.safe_dump(data, fh, default_flow_style=False)
727         else:
728             logging.info("No services detected in env file, not merging "
729                          "services")
730
731     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
732     with open(tmp_opnfv_env, 'r') as fh:
733         logging.debug("opnfv-environment content is : {}".format(
734             pprint.pformat(yaml.safe_load(fh.read()))
735         ))
736
737
738 def generate_ceph_key():
739     key = os.urandom(16)
740     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
741     return base64.b64encode(header + key)
742
743
744 def prep_storage_env(ds, ns, virtual, tmp_dir):
745     """
746     Creates storage environment file for deployment.  Source file is copied by
747     undercloud playbook to host.
748     :param ds:
749     :param ns:
750     :param virtual:
751     :param tmp_dir:
752     :return:
753     """
754     ds_opts = ds['deploy_options']
755     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
756     if not os.path.isfile(storage_file):
757         logging.error("storage-environment file is not in tmp directory: {}. "
758                       "Check if file was copied from "
759                       "undercloud".format(tmp_dir))
760         raise ApexDeployException("storage-environment file not copied from "
761                                   "undercloud")
762     for line in fileinput.input(storage_file, inplace=True):
763         line = line.strip('\n')
764         if 'CephClusterFSID' in line:
765             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
766         elif 'CephMonKey' in line:
767             print("  CephMonKey: {}".format(generate_ceph_key().decode(
768                 'utf-8')))
769         elif 'CephAdminKey' in line:
770             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
771                 'utf-8')))
772         elif 'CephClientKey' in line:
773             print("  CephClientKey: {}".format(generate_ceph_key().decode(
774                 'utf-8')))
775         else:
776             print(line)
777
778     if ds_opts['containers']:
779         ceph_params = {}
780
781         # max pgs allowed are calculated as num_mons * 200. Therefore we
782         # set number of pgs and pools so that the total will be less:
783         # num_pgs * num_pools * num_osds
784         ceph_params['CephPoolDefaultSize'] = 2
785         ceph_params['CephPoolDefaultPgNum'] = 32
786         if virtual:
787             ceph_params['CephAnsibleExtraConfig'] = {
788                 'centos_package_dependencies': [],
789                 'ceph_osd_docker_memory_limit': '1g',
790                 'ceph_mds_docker_memory_limit': '1g',
791             }
792         ceph_device = ds_opts['ceph_device']
793         ceph_params['CephAnsibleDisksConfig'] = {
794             'devices': [ceph_device],
795             'journal_size': 512,
796             'osd_scenario': 'collocated'
797         }
798         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
799     # TODO(trozet): remove following block as we only support containers now
800     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
801         with open(storage_file, 'a') as fh:
802             fh.write('  ExtraConfig:\n')
803             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
804                 ds_opts['ceph_device']
805             ))
806
807
808 def prep_sriov_env(ds, tmp_dir):
809     """
810     Creates SRIOV environment file for deployment. Source file is copied by
811     undercloud playbook to host.
812     :param ds:
813     :param tmp_dir:
814     :return:
815     """
816     ds_opts = ds['deploy_options']
817     sriov_iface = ds_opts['sriov']
818     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
819     if not os.path.isfile(sriov_file):
820         logging.error("sriov-environment file is not in tmp directory: {}. "
821                       "Check if file was copied from "
822                       "undercloud".format(tmp_dir))
823         raise ApexDeployException("sriov-environment file not copied from "
824                                   "undercloud")
825     # TODO(rnoriega): Instead of line editing, refactor this code to load
826     # yaml file into a dict, edit it and write the file back.
827     for line in fileinput.input(sriov_file, inplace=True):
828         line = line.strip('\n')
829         if 'NovaSchedulerDefaultFilters' in line:
830             print("  {}".format(line[3:]))
831         elif 'NovaSchedulerAvailableFilters' in line:
832             print("  {}".format(line[3:]))
833         elif 'NeutronPhysicalDevMappings' in line:
834             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
835                   .format(sriov_iface))
836         elif 'NeutronSriovNumVFs' in line:
837             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
838         elif 'NovaPCIPassthrough' in line:
839             print("  NovaPCIPassthrough:")
840         elif 'devname' in line:
841             print("    - devname: \"{}\"".format(sriov_iface))
842         elif 'physical_network' in line:
843             print("      physical_network: \"nfv_sriov\"")
844         else:
845             print(line)
846
847
848 def external_network_cmds(ns, ds):
849     """
850     Generates external network openstack commands
851     :param ns: network settings
852     :param ds: deploy settings
853     :return: list of commands to configure external network
854     """
855     ds_opts = ds['deploy_options']
856     external_physnet = 'datacentre'
857     if ds_opts['dataplane'] == 'fdio' and \
858        ds_opts['sdn_controller'] != 'opendaylight':
859         external_physnet = 'external'
860     if 'external' in ns.enabled_network_list:
861         net_config = ns['networks']['external'][0]
862         external = True
863         pool_start, pool_end = net_config['floating_ip_range']
864     else:
865         net_config = ns['networks']['admin']
866         external = False
867         pool_start, pool_end = ns['apex']['networks']['admin'][
868             'introspection_range']
869     nic_config = net_config['nic_mapping']
870     gateway = net_config['gateway']
871     cmds = list()
872     # create network command
873     if nic_config['compute']['vlan'] == 'native':
874         ext_type = 'flat'
875     else:
876         ext_type = "vlan --provider-segment {}".format(nic_config[
877                                                        'compute']['vlan'])
878     cmds.append("openstack network create external --project service "
879                 "--external --provider-network-type {} "
880                 "--provider-physical-network {}"
881                 .format(ext_type, external_physnet))
882     # create subnet command
883     cidr = net_config['cidr']
884     subnet_cmd = "openstack subnet create external-subnet --project " \
885                  "service --network external --no-dhcp --gateway {} " \
886                  "--allocation-pool start={},end={} --subnet-range " \
887                  "{}".format(gateway, pool_start, pool_end, str(cidr))
888     if external and cidr.version == 6:
889         subnet_cmd += ' --ip-version 6'
890     cmds.append(subnet_cmd)
891     logging.debug("Neutron external network commands determined "
892                   "as: {}".format(cmds))
893     return cmds
894
895
896 def create_congress_cmds(overcloud_file):
897     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
898     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
899     logging.info("Creating congress commands")
900     try:
901         ds_cfg = [
902             "username={}".format(overcloudrc['OS_USERNAME']),
903             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
904             "password={}".format(overcloudrc['OS_PASSWORD']),
905             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
906         ]
907     except KeyError:
908         logging.error("Unable to find all keys required for congress in "
909                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
910                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
911                       "file: {}".format(overcloud_file))
912         raise
913     cmds = list()
914     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
915
916     for driver in drivers:
917         if driver == 'doctor':
918             cmd = "{} \"{}\"".format(driver, driver)
919         else:
920             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
921         if driver == 'nova':
922             cmd += ' --config api_version="2.34"'
923         logging.debug("Congress command created: {}".format(cmd))
924         cmds.append(cmd)
925     return cmds