Always update OVS to 2.9.2
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import pprint
16 import shutil
17 import uuid
18 import struct
19 import time
20 import yaml
21 import apex.builders.overcloud_builder as oc_builder
22 import apex.builders.common_builder as c_builder
23
24 from apex.common import constants as con
25 from apex.common.exceptions import ApexDeployException
26 from apex.common import parsers
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29 from cryptography.hazmat.primitives import serialization as \
30     crypto_serialization
31 from cryptography.hazmat.primitives.asymmetric import rsa
32 from cryptography.hazmat.backends import default_backend as \
33     crypto_default_backend
34
35
36 SDN_FILE_MAP = {
37     'opendaylight': {
38         'sfc': 'neutron-sfc-opendaylight.yaml',
39         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
40         'gluon': 'gluon.yaml',
41         'vpp': {
42             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
43             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
44             'default': 'neutron-opendaylight-honeycomb.yaml'
45         },
46         'l2gw': 'neutron-l2gw-opendaylight.yaml',
47         'sriov': 'neutron-opendaylight-sriov.yaml',
48         'default': 'neutron-opendaylight.yaml',
49     },
50     'onos': {
51         'sfc': 'neutron-onos-sfc.yaml',
52         'default': 'neutron-onos.yaml'
53     },
54     'ovn': 'neutron-ml2-ovn.yaml',
55     False: {
56         'vpp': 'neutron-ml2-vpp.yaml',
57         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
58     }
59 }
60
61 OTHER_FILE_MAP = {
62     'tacker': 'enable_tacker.yaml',
63     'congress': 'enable_congress.yaml',
64     'barometer': 'enable_barometer.yaml',
65     'rt_kvm': 'enable_rt_kvm.yaml'
66 }
67
68 OVS_PERF_MAP = {
69     'HostCpusList': 'dpdk_cores',
70     'NeutronDpdkCoreList': 'pmd_cores',
71     'NeutronDpdkSocketMemory': 'socket_memory',
72     'NeutronDpdkMemoryChannels': 'memory_channels'
73 }
74
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95 DUPLICATE_COMPUTE_SERVICES = [
96     'OS::TripleO::Services::ComputeNeutronCorePlugin',
97     'OS::TripleO::Services::ComputeNeutronMetadataAgent',
98     'OS::TripleO::Services::ComputeNeutronOvsAgent',
99     'OS::TripleO::Services::ComputeNeutronL3Agent'
100 ]
101
102
103 def build_sdn_env_list(ds, sdn_map, env_list=None):
104     """
105     Builds a list of SDN environment files to be used in the deploy cmd.
106
107     This function recursively searches an sdn_map.  First the sdn controller is
108     matched and then the function looks for enabled features for that
109     controller to determine which environment files should be used.  By
110     default the feature will be checked if set to true in deploy settings to be
111     added to the list.  If a feature does not have a boolean value, then the
112     key and value pair to compare with are checked as a tuple (k,v).
113
114     :param ds: deploy settings
115     :param sdn_map: SDN map to recursively search
116     :param env_list: recursive var to hold previously found env_list
117     :return: A list of env files
118     """
119     if env_list is None:
120         env_list = list()
121     for k, v in sdn_map.items():
122         if ds['sdn_controller'] == k or (k in ds and ds[k]):
123             if isinstance(v, dict):
124                 # Append default SDN env file first
125                 # The assumption is that feature-enabled SDN env files
126                 # override and do not conflict with previously set default
127                 # settings
128                 if ds['sdn_controller'] == k and 'default' in v:
129                     env_list.append(os.path.join(con.THT_ENV_DIR,
130                                                  v['default']))
131                 env_list.extend(build_sdn_env_list(ds, v))
132             # check if the value is not a boolean
133             elif isinstance(v, tuple):
134                     if ds[k] == v[0]:
135                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
136             else:
137                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
138     if len(env_list) == 0:
139         try:
140             env_list.append(os.path.join(
141                 con.THT_ENV_DIR, sdn_map['default']))
142         except KeyError:
143             logging.warning("Unable to find default file for SDN")
144
145     return env_list
146
147
148 def get_docker_sdn_files(ds_opts):
149     """
150     Returns docker env file for detected SDN
151     :param ds_opts: deploy options
152     :return: list of docker THT env files for an SDN
153     """
154     docker_services = con.VALID_DOCKER_SERVICES
155     tht_dir = con.THT_DOCKER_ENV_DIR
156     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
157     for i, sdn_file in enumerate(sdn_env_list):
158         sdn_base = os.path.basename(sdn_file)
159         if sdn_base in docker_services:
160             if docker_services[sdn_base] is not None:
161                 sdn_env_list[i] = \
162                     os.path.join(tht_dir, docker_services[sdn_base])
163             else:
164                 sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
165     return sdn_env_list
166
167
168 def create_deploy_cmd(ds, ns, inv, tmp_dir,
169                       virtual, env_file='opnfv-environment.yaml',
170                       net_data=False):
171
172     logging.info("Creating deployment command")
173     deploy_options = ['network-environment.yaml']
174
175     ds_opts = ds['deploy_options']
176
177     if ds_opts['containers']:
178         deploy_options.append(os.path.join(con.THT_ENV_DIR,
179                                            'docker.yaml'))
180
181     if ds['global_params']['ha_enabled']:
182         if ds_opts['containers']:
183             deploy_options.append(os.path.join(con.THT_ENV_DIR,
184                                                'docker-ha.yaml'))
185         else:
186             deploy_options.append(os.path.join(con.THT_ENV_DIR,
187                                                'puppet-pacemaker.yaml'))
188
189     if env_file:
190         deploy_options.append(env_file)
191
192     if ds_opts['containers']:
193         deploy_options.append('docker-images.yaml')
194         sdn_docker_files = get_docker_sdn_files(ds_opts)
195         for sdn_docker_file in sdn_docker_files:
196             deploy_options.append(sdn_docker_file)
197     else:
198         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
199
200     for k, v in OTHER_FILE_MAP.items():
201         if k in ds_opts and ds_opts[k]:
202             if ds_opts['containers']:
203                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
204                                                    "{}.yaml".format(k)))
205             else:
206                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
207
208     # TODO(trozet) Fix this check to look for if ceph is in controller services
209     # and not use name of the file
210     if ds_opts['ceph'] and 'csit' not in env_file:
211         prep_storage_env(ds, ns, virtual, tmp_dir)
212         deploy_options.append(os.path.join(con.THT_ENV_DIR,
213                                            'storage-environment.yaml'))
214     if ds_opts['sriov']:
215         prep_sriov_env(ds, tmp_dir)
216
217     # Check for 'k8s' here intentionally, as we may support other values
218     # such as openstack/openshift for 'vim' option.
219     if ds_opts['vim'] == 'k8s':
220         deploy_options.append('kubernetes-environment.yaml')
221
222     if virtual:
223         deploy_options.append('virtual-environment.yaml')
224     else:
225         deploy_options.append('baremetal-environment.yaml')
226
227     num_control, num_compute = inv.get_node_counts()
228     if num_control > 1 and not ds['global_params']['ha_enabled']:
229         num_control = 1
230     if platform.machine() == 'aarch64':
231         # aarch64 deploys were not completing in the default 90 mins.
232         # Not sure if this is related to the hardware the OOO support
233         # was developed on or the virtualization support in CentOS
234         # Either way it will probably get better over time  as the aarch
235         # support matures in CentOS and deploy time should be tested in
236         # the future so this multiplier can be removed.
237         con.DEPLOY_TIMEOUT *= 2
238     cmd = "openstack overcloud deploy --templates --timeout {} " \
239           .format(con.DEPLOY_TIMEOUT)
240     # build cmd env args
241     for option in deploy_options:
242         cmd += " -e {}".format(option)
243     cmd += " --ntp-server {}".format(ns['ntp'][0])
244     cmd += " --control-scale {}".format(num_control)
245     cmd += " --compute-scale {}".format(num_compute)
246     cmd += ' --control-flavor control --compute-flavor compute'
247     if net_data:
248         cmd += ' --networks-file network_data.yaml'
249     libvirt_type = 'kvm'
250     if virtual and (platform.machine() != 'aarch64'):
251         with open('/sys/module/kvm_intel/parameters/nested') as f:
252             nested_kvm = f.read().strip()
253             if nested_kvm != 'Y':
254                 libvirt_type = 'qemu'
255     elif virtual and (platform.machine() == 'aarch64'):
256         libvirt_type = 'qemu'
257     cmd += ' --libvirt-type {}'.format(libvirt_type)
258     if platform.machine() == 'aarch64':
259         cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
260     logging.info("Deploy command set: {}".format(cmd))
261
262     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
263         fh.write(cmd)
264     return cmd
265
266
267 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
268                patches=None):
269     """
270     Locates sdn image and preps for deployment.
271     :param ds: deploy settings
272     :param ns: network settings
273     :param img: sdn image
274     :param tmp_dir: dir to store modified sdn image
275     :param root_pw: password to configure for overcloud image
276     :param docker_tag: Docker image tag for RDO version (default None)
277     :param patches: List of patches to apply to overcloud image
278     :return: None
279     """
280     # TODO(trozet): Come up with a better way to organize this logic in this
281     # function
282     logging.info("Preparing image: {} for deployment".format(img))
283     if not os.path.isfile(img):
284         logging.error("Missing SDN image {}".format(img))
285         raise ApexDeployException("Missing SDN image file: {}".format(img))
286
287     ds_opts = ds['deploy_options']
288     virt_cmds = list()
289     sdn = ds_opts['sdn_controller']
290     patched_containers = set()
291     # we need this due to rhbz #1436021
292     # fixed in systemd-219-37.el7
293     if sdn is not False:
294         logging.info("Neutron openvswitch-agent disabled")
295         virt_cmds.extend([{
296             con.VIRT_RUN_CMD:
297                 "rm -f /etc/systemd/system/multi-user.target.wants/"
298                 "neutron-openvswitch-agent.service"},
299             {
300             con.VIRT_RUN_CMD:
301                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
302                 ".service"
303         }])
304
305     if ns.get('http_proxy', ''):
306         virt_cmds.append({
307             con.VIRT_RUN_CMD:
308                 "echo 'http_proxy={}' >> /etc/environment".format(
309                     ns['http_proxy'])})
310
311     if ns.get('https_proxy', ''):
312         virt_cmds.append({
313             con.VIRT_RUN_CMD:
314                 "echo 'https_proxy={}' >> /etc/environment".format(
315                     ns['https_proxy'])})
316
317     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
318     shutil.copyfile(img, tmp_oc_image)
319     logging.debug("Temporary overcloud image stored as: {}".format(
320         tmp_oc_image))
321
322     if ds_opts['vpn']:
323         oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
324         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
325         virt_cmds.append({
326             con.VIRT_RUN_CMD:
327                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
328                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
329         virt_cmds.append({
330             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
331                               "zrpcd_start.sh"})
332         virt_cmds.append({
333             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
334                               "init.d/zrpcd_start.sh' /etc/rc.local "})
335         virt_cmds.append({
336             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
337                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
338         logging.info("ZRPCD process started")
339
340     dataplane = ds_opts['dataplane']
341     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
342         logging.info("Enabling kernel modules for dpdk")
343         # file to module mapping
344         uio_types = {
345             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
346             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
347         }
348         for mod_file, mod in uio_types.items():
349             with open(mod_file, 'w') as fh:
350                 fh.write('#!/bin/bash\n')
351                 fh.write('exec /sbin/modprobe {}'.format(mod))
352                 fh.close()
353
354             virt_cmds.extend([
355                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
356                     mod_file)},
357                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
358                                    "{}".format(os.path.basename(mod_file))}
359             ])
360     if root_pw:
361         pw_op = "password:{}".format(root_pw)
362         virt_cmds.append({con.VIRT_PW: pw_op})
363
364     if dataplane == 'ovs':
365         # FIXME(trozet) remove this after RDO is updated with fix for
366         # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
367         # https://review.rdoproject.org/r/#/c/13839/
368         oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
369
370     if dataplane == 'fdio':
371         # Patch neutron with using OVS external interface for router
372         # and add generic linux NS interface driver
373         virt_cmds.append(
374             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
375                                "-p1 < neutron-patch-NSDriver.patch"})
376         if sdn is False:
377             virt_cmds.extend([
378                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
379                 {con.VIRT_RUN_CMD: "yum install -y "
380                                    "/root/nosdn_vpp_rpms/*.rpm"}
381             ])
382
383     undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
384         'installer_vm']['ip']
385     if sdn == 'opendaylight':
386         oc_builder.inject_opendaylight(
387             odl_version=ds_opts['odl_version'],
388             image=tmp_oc_image,
389             tmp_dir=tmp_dir,
390             uc_ip=undercloud_admin_ip,
391             os_version=ds_opts['os_version'],
392             docker_tag=docker_tag,
393         )
394         if docker_tag:
395             patched_containers = patched_containers.union({'opendaylight'})
396
397     if patches:
398         if ds_opts['os_version'] == 'master':
399             branch = ds_opts['os_version']
400         else:
401             branch = "stable/{}".format(ds_opts['os_version'])
402         logging.info('Adding patches to overcloud')
403         patched_containers = patched_containers.union(
404             c_builder.add_upstream_patches(patches,
405                                            tmp_oc_image, tmp_dir,
406                                            branch,
407                                            uc_ip=undercloud_admin_ip,
408                                            docker_tag=docker_tag))
409     # if containers with ceph, and no ceph device we need to use a
410     # persistent loop device for Ceph OSDs
411     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
412         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
413         with open(tmp_losetup, 'w') as fh:
414             fh.write(LOSETUP_SERVICE)
415         virt_cmds.extend([
416             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
417              },
418             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
419                 .format(LOOP_DEVICE_SIZE)},
420             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
421             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
422         ])
423     # TODO(trozet) remove this after LP#173474 is fixed
424     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
425     virt_cmds.append(
426         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
427                            "ConditionPathExists".format(dhcp_unit)})
428     # Prep for NFS
429     virt_cmds.extend([
430         {con.VIRT_INSTALL: "nfs-utils"},
431         {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
432                            "/etc/systemd/system/multi-user.target.wants/"
433                            "nfs-server.service"},
434         {con.VIRT_RUN_CMD: "mkdir -p /glance"},
435         {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
436         {con.VIRT_RUN_CMD: "mkdir -p /nova"},
437         {con.VIRT_RUN_CMD: "chmod 777 /glance"},
438         {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
439         {con.VIRT_RUN_CMD: "chmod 777 /nova"},
440         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /glance"},
441         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /cinder"},
442         {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /nova"},
443         {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
444                            "no_root_squash,no_acl)' > /etc/exports"},
445         {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
446                            "no_root_squash,no_acl)' >> /etc/exports"},
447         {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
448                            "no_root_squash,no_acl)' >> /etc/exports"},
449         {con.VIRT_RUN_CMD: "exportfs -avr"},
450     ])
451     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
452     logging.info("Overcloud image customization complete")
453     return patched_containers
454
455
456 def make_ssh_key():
457     """
458     Creates public and private ssh keys with 1024 bit RSA encryption
459     :return: private, public key
460     """
461     key = rsa.generate_private_key(
462         backend=crypto_default_backend(),
463         public_exponent=65537,
464         key_size=1024
465     )
466
467     private_key = key.private_bytes(
468         crypto_serialization.Encoding.PEM,
469         crypto_serialization.PrivateFormat.PKCS8,
470         crypto_serialization.NoEncryption())
471     public_key = key.public_key().public_bytes(
472         crypto_serialization.Encoding.OpenSSH,
473         crypto_serialization.PublicFormat.OpenSSH
474     )
475     return private_key.decode('utf-8'), public_key.decode('utf-8')
476
477
478 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
479     """
480     Creates modified opnfv/network environments for deployment
481     :param ds: deploy settings
482     :param ns: network settings
483     :param inv: node inventory
484     :param opnfv_env: file path for opnfv-environment file
485     :param net_env: file path for network-environment file
486     :param tmp_dir: Apex tmp dir
487     :return:
488     """
489
490     logging.info("Preparing opnfv-environment and network-environment files")
491     ds_opts = ds['deploy_options']
492     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
493     shutil.copyfile(opnfv_env, tmp_opnfv_env)
494     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
495     tenant_nic = dict()
496     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
497     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
498     external_nic_map = ns['networks']['external'][0]['nic_mapping']
499     external_nic = dict()
500     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
501
502     # SSH keys
503     private_key, public_key = make_ssh_key()
504
505     num_control, num_compute = inv.get_node_counts()
506     if num_control > 1 and not ds['global_params']['ha_enabled']:
507         num_control = 1
508
509     # Make easier/faster variables to index in the file editor
510     if 'performance' in ds_opts:
511         perf = True
512         # vpp
513         if 'vpp' in ds_opts['performance']['Compute']:
514             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
515         else:
516             perf_vpp_comp = None
517         if 'vpp' in ds_opts['performance']['Controller']:
518             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
519         else:
520             perf_vpp_ctrl = None
521
522         # ovs
523         if 'ovs' in ds_opts['performance']['Compute']:
524             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
525         else:
526             perf_ovs_comp = None
527
528         # kernel
529         if 'kernel' in ds_opts['performance']['Compute']:
530             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
531         else:
532             perf_kern_comp = None
533     else:
534         perf = False
535
536     tenant_settings = ns['networks']['tenant']
537     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
538         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
539
540     # Modify OPNFV environment
541     # TODO: Change to build a dict and outputting yaml rather than parsing
542     for line in fileinput.input(tmp_opnfv_env, inplace=True):
543         line = line.strip('\n')
544         output_line = line
545         if 'CloudDomain' in line:
546             output_line = "  CloudDomain: {}".format(ns['domain_name'])
547         elif 'replace_private_key' in line:
548             output_line = "    private_key: |\n"
549             key_out = ''
550             for line in private_key.splitlines():
551                 key_out += "      {}\n".format(line)
552             output_line += key_out
553         elif 'replace_public_key' in line:
554             output_line = "    public_key: '{}'".format(public_key)
555         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
556                 'resource_registry' in line:
557             output_line = "resource_registry:\n" \
558                           "  OS::TripleO::NodeUserData: first-boot.yaml"
559         elif 'ComputeExtraConfigPre' in line and \
560                 ds_opts['dataplane'] == 'ovs_dpdk':
561             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
562                           './ovs-dpdk-preconfig.yaml'
563         elif 'NeutronNetworkVLANRanges' in line:
564             vlan_setting = ''
565             if tenant_vlan_enabled:
566                 if ns['networks']['tenant']['overlay_id_range']:
567                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
568                     if 'datacentre' not in vlan_setting:
569                         vlan_setting += ',datacentre:1:1000'
570             # SRIOV networks are VLAN based provider networks. In order to
571             # simplify the deployment, nfv_sriov will be the default physnet.
572             # VLANs are not needed in advance, and the user will have to create
573             # the network specifying the segmentation-id.
574             if ds_opts['sriov']:
575                 if vlan_setting:
576                     vlan_setting += ",nfv_sriov"
577                 else:
578                     vlan_setting = "datacentre:1:1000,nfv_sriov"
579             if vlan_setting:
580                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
581         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
582             if tenant_settings['overlay_id_range']:
583                 physnets = tenant_settings['overlay_id_range'].split(',')
584                 output_line = "  NeutronBridgeMappings: "
585                 for physnet in physnets:
586                     physnet_name = physnet.split(':')[0]
587                     if physnet_name != 'datacentre':
588                         output_line += "{}:br-vlan,".format(physnet_name)
589                 output_line += "datacentre:br-ex"
590         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
591                 and ds_opts['sdn_controller'] == 'opendaylight':
592             if tenant_settings['overlay_id_range']:
593                 physnets = tenant_settings['overlay_id_range'].split(',')
594                 output_line = "  OpenDaylightProviderMappings: "
595                 for physnet in physnets:
596                     physnet_name = physnet.split(':')[0]
597                     if physnet_name != 'datacentre':
598                         output_line += "{}:br-vlan,".format(physnet_name)
599                 output_line += "datacentre:br-ex"
600         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
601             output_line = "  NeutronNetworkType: vlan\n" \
602                           "  NeutronTunnelTypes: ''"
603
604         if ds_opts['sdn_controller'] == 'opendaylight' and \
605                 'odl_vpp_routing_node' in ds_opts:
606             if 'opendaylight::vpp_routing_node' in line:
607                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
608                                .format(ds_opts['odl_vpp_routing_node'],
609                                        ns['domain_name']))
610         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
611             if 'NeutronVPPAgentPhysnets' in line:
612                 # VPP interface tap0 will be used for external network
613                 # connectivity.
614                 output_line = ("  NeutronVPPAgentPhysnets: "
615                                "'datacentre:{},external:tap0'"
616                                .format(tenant_nic['Controller']))
617         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
618                 'dvr') is True:
619             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
620                 output_line = ''
621             elif 'NeutronDhcpAgentsPerNetwork' in line:
622                 if num_compute == 0:
623                     num_dhcp_agents = num_control
624                 else:
625                     num_dhcp_agents = num_compute
626                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
627                                .format(num_dhcp_agents))
628             elif 'ComputeServices' in line:
629                 output_line = ("  ComputeServices:\n"
630                                "    - OS::TripleO::Services::NeutronDhcpAgent")
631
632         if perf:
633             for role in 'NovaCompute', 'Controller':
634                 if role == 'NovaCompute':
635                     perf_opts = perf_vpp_comp
636                 else:
637                     perf_opts = perf_vpp_ctrl
638                 cfg = "{}ExtraConfig".format(role)
639                 if cfg in line and perf_opts:
640                     perf_line = ''
641                     if 'main-core' in perf_opts:
642                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
643                                       .format(perf_opts['main-core']))
644                     if 'corelist-workers' in perf_opts:
645                         perf_line += ("\n    "
646                                       "fdio::vpp_cpu_corelist_workers: '{}'"
647                                       .format(perf_opts['corelist-workers']))
648                     if ds_opts['sdn_controller'] == 'opendaylight' and \
649                             ds_opts['dataplane'] == 'fdio':
650                         if role == 'NovaCompute':
651                             perf_line += ("\n    "
652                                           "tripleo::profile::base::neutron::"
653                                           "agents::honeycomb::"
654                                           "interface_role_mapping:"
655                                           " ['{}:tenant-interface',"
656                                           "'{}:public-interface']"
657                                           .format(tenant_nic[role],
658                                                   external_nic[role]))
659                         else:
660                             perf_line += ("\n    "
661                                           "tripleo::profile::base::neutron::"
662                                           "agents::honeycomb::"
663                                           "interface_role_mapping:"
664                                           " ['{}:tenant-interface']"
665                                           .format(tenant_nic[role]))
666                     if perf_line:
667                         output_line = ("  {}:{}".format(cfg, perf_line))
668
669             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
670                 for k, v in OVS_PERF_MAP.items():
671                     if k in line and v in perf_ovs_comp:
672                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
673
674             # kernel args
675             # (FIXME) use compute's kernel settings for all nodes for now.
676             if perf_kern_comp:
677                 if 'NovaSchedulerDefaultFilters' in line:
678                     output_line = \
679                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
680                         "ComputeFilter,AvailabilityZoneFilter," \
681                         "ComputeCapabilitiesFilter," \
682                         "ImagePropertiesFilter,NUMATopologyFilter'"
683                 elif 'ComputeKernelArgs' in line:
684                     kernel_args = ''
685                     for k, v in perf_kern_comp.items():
686                         kernel_args += "{}={} ".format(k, v)
687                     if kernel_args:
688                         output_line = "  ComputeKernelArgs: '{}'".\
689                             format(kernel_args)
690
691         print(output_line)
692
693     # Merge compute services into control services if only a single
694     # node deployment
695     if num_compute == 0:
696         logging.info("All in one deployment. Checking if service merging "
697                      "required into control services")
698         with open(tmp_opnfv_env, 'r') as fh:
699             data = yaml.safe_load(fh)
700         param_data = data['parameter_defaults']
701         # Check to see if any parameters are set for Compute
702         for param in param_data.keys():
703             if param != 'ComputeServices' and param.startswith('Compute'):
704                 logging.warning("Compute parameter set, but will not be used "
705                                 "in deployment: {}. Please use Controller "
706                                 "based parameters when using All-in-one "
707                                 "deployments".format(param))
708         if ('ControllerServices' in param_data and 'ComputeServices' in
709                 param_data):
710             logging.info("Services detected in environment file. Merging...")
711             ctrl_services = param_data['ControllerServices']
712             cmp_services = param_data['ComputeServices']
713             param_data['ControllerServices'] = list(set().union(
714                 ctrl_services, cmp_services))
715             for dup_service in DUPLICATE_COMPUTE_SERVICES:
716                 if dup_service in param_data['ControllerServices']:
717                     param_data['ControllerServices'].remove(dup_service)
718             param_data.pop('ComputeServices')
719             logging.debug("Merged controller services: {}".format(
720                 pprint.pformat(param_data['ControllerServices'])
721             ))
722             with open(tmp_opnfv_env, 'w') as fh:
723                 yaml.safe_dump(data, fh, default_flow_style=False)
724         else:
725             logging.info("No services detected in env file, not merging "
726                          "services")
727
728     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
729     with open(tmp_opnfv_env, 'r') as fh:
730         logging.debug("opnfv-environment content is : {}".format(
731             pprint.pformat(yaml.safe_load(fh.read()))
732         ))
733
734
735 def generate_ceph_key():
736     key = os.urandom(16)
737     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
738     return base64.b64encode(header + key)
739
740
741 def prep_storage_env(ds, ns, virtual, tmp_dir):
742     """
743     Creates storage environment file for deployment.  Source file is copied by
744     undercloud playbook to host.
745     :param ds:
746     :param ns:
747     :param virtual:
748     :param tmp_dir:
749     :return:
750     """
751     ds_opts = ds['deploy_options']
752     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
753     if not os.path.isfile(storage_file):
754         logging.error("storage-environment file is not in tmp directory: {}. "
755                       "Check if file was copied from "
756                       "undercloud".format(tmp_dir))
757         raise ApexDeployException("storage-environment file not copied from "
758                                   "undercloud")
759     for line in fileinput.input(storage_file, inplace=True):
760         line = line.strip('\n')
761         if 'CephClusterFSID' in line:
762             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
763         elif 'CephMonKey' in line:
764             print("  CephMonKey: {}".format(generate_ceph_key().decode(
765                 'utf-8')))
766         elif 'CephAdminKey' in line:
767             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
768                 'utf-8')))
769         elif 'CephClientKey' in line:
770             print("  CephClientKey: {}".format(generate_ceph_key().decode(
771                 'utf-8')))
772         else:
773             print(line)
774
775     if ds_opts['containers']:
776         ceph_params = {}
777
778         # max pgs allowed are calculated as num_mons * 200. Therefore we
779         # set number of pgs and pools so that the total will be less:
780         # num_pgs * num_pools * num_osds
781         ceph_params['CephPoolDefaultSize'] = 2
782         ceph_params['CephPoolDefaultPgNum'] = 32
783         if virtual:
784             ceph_params['CephAnsibleExtraConfig'] = {
785                 'centos_package_dependencies': [],
786                 'ceph_osd_docker_memory_limit': '1g',
787                 'ceph_mds_docker_memory_limit': '1g',
788             }
789         ceph_device = ds_opts['ceph_device']
790         ceph_params['CephAnsibleDisksConfig'] = {
791             'devices': [ceph_device],
792             'journal_size': 512,
793             'osd_scenario': 'collocated'
794         }
795         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
796     # TODO(trozet): remove following block as we only support containers now
797     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
798         with open(storage_file, 'a') as fh:
799             fh.write('  ExtraConfig:\n')
800             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
801                 ds_opts['ceph_device']
802             ))
803
804
805 def prep_sriov_env(ds, tmp_dir):
806     """
807     Creates SRIOV environment file for deployment. Source file is copied by
808     undercloud playbook to host.
809     :param ds:
810     :param tmp_dir:
811     :return:
812     """
813     ds_opts = ds['deploy_options']
814     sriov_iface = ds_opts['sriov']
815     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
816     if not os.path.isfile(sriov_file):
817         logging.error("sriov-environment file is not in tmp directory: {}. "
818                       "Check if file was copied from "
819                       "undercloud".format(tmp_dir))
820         raise ApexDeployException("sriov-environment file not copied from "
821                                   "undercloud")
822     # TODO(rnoriega): Instead of line editing, refactor this code to load
823     # yaml file into a dict, edit it and write the file back.
824     for line in fileinput.input(sriov_file, inplace=True):
825         line = line.strip('\n')
826         if 'NovaSchedulerDefaultFilters' in line:
827             print("  {}".format(line[3:]))
828         elif 'NovaSchedulerAvailableFilters' in line:
829             print("  {}".format(line[3:]))
830         elif 'NeutronPhysicalDevMappings' in line:
831             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
832                   .format(sriov_iface))
833         elif 'NeutronSriovNumVFs' in line:
834             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
835         elif 'NovaPCIPassthrough' in line:
836             print("  NovaPCIPassthrough:")
837         elif 'devname' in line:
838             print("    - devname: \"{}\"".format(sriov_iface))
839         elif 'physical_network' in line:
840             print("      physical_network: \"nfv_sriov\"")
841         else:
842             print(line)
843
844
845 def external_network_cmds(ns, ds):
846     """
847     Generates external network openstack commands
848     :param ns: network settings
849     :param ds: deploy settings
850     :return: list of commands to configure external network
851     """
852     ds_opts = ds['deploy_options']
853     external_physnet = 'datacentre'
854     if ds_opts['dataplane'] == 'fdio' and \
855        ds_opts['sdn_controller'] != 'opendaylight':
856         external_physnet = 'external'
857     if 'external' in ns.enabled_network_list:
858         net_config = ns['networks']['external'][0]
859         external = True
860         pool_start, pool_end = net_config['floating_ip_range']
861     else:
862         net_config = ns['networks']['admin']
863         external = False
864         pool_start, pool_end = ns['apex']['networks']['admin'][
865             'introspection_range']
866     nic_config = net_config['nic_mapping']
867     gateway = net_config['gateway']
868     cmds = list()
869     # create network command
870     if nic_config['compute']['vlan'] == 'native':
871         ext_type = 'flat'
872     else:
873         ext_type = "vlan --provider-segment {}".format(nic_config[
874                                                        'compute']['vlan'])
875     cmds.append("openstack network create external --project service "
876                 "--external --provider-network-type {} "
877                 "--provider-physical-network {}"
878                 .format(ext_type, external_physnet))
879     # create subnet command
880     cidr = net_config['cidr']
881     subnet_cmd = "openstack subnet create external-subnet --project " \
882                  "service --network external --no-dhcp --gateway {} " \
883                  "--allocation-pool start={},end={} --subnet-range " \
884                  "{}".format(gateway, pool_start, pool_end, str(cidr))
885     if external and cidr.version == 6:
886         subnet_cmd += ' --ip-version 6'
887     cmds.append(subnet_cmd)
888     logging.debug("Neutron external network commands determined "
889                   "as: {}".format(cmds))
890     return cmds
891
892
893 def create_congress_cmds(overcloud_file):
894     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
895     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
896     logging.info("Creating congress commands")
897     try:
898         ds_cfg = [
899             "username={}".format(overcloudrc['OS_USERNAME']),
900             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
901             "password={}".format(overcloudrc['OS_PASSWORD']),
902             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
903         ]
904     except KeyError:
905         logging.error("Unable to find all keys required for congress in "
906                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
907                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
908                       "file: {}".format(overcloud_file))
909         raise
910     cmds = list()
911     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
912
913     for driver in drivers:
914         if driver == 'doctor':
915             cmd = "{} \"{}\"".format(driver, driver)
916         else:
917             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
918         if driver == 'nova':
919             cmd += ' --config api_version="2.34"'
920         logging.debug("Congress command created: {}".format(cmd))
921         cmds.append(cmd)
922     return cmds