Add tenant vlan support
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import shutil
16 import uuid
17 import struct
18 import time
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
21
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
28     crypto_serialization
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31     crypto_default_backend
32
33
34 SDN_FILE_MAP = {
35     'opendaylight': {
36         'sfc': 'neutron-sfc-opendaylight.yaml',
37         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38         'gluon': 'gluon.yaml',
39         'vpp': {
40             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42             'default': 'neutron-opendaylight-honeycomb.yaml'
43         },
44         'l2gw': 'neutron-l2gw-opendaylight.yaml',
45         'sriov': 'neutron-opendaylight-sriov.yaml',
46         'default': 'neutron-opendaylight.yaml',
47     },
48     'onos': {
49         'sfc': 'neutron-onos-sfc.yaml',
50         'default': 'neutron-onos.yaml'
51     },
52     'ovn': 'neutron-ml2-ovn.yaml',
53     False: {
54         'vpp': 'neutron-ml2-vpp.yaml',
55         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
56     }
57 }
58
59 OTHER_FILE_MAP = {
60     'tacker': 'enable_tacker.yaml',
61     'congress': 'enable_congress.yaml',
62     'barometer': 'enable_barometer.yaml',
63     'rt_kvm': 'enable_rt_kvm.yaml'
64 }
65
66 OVS_PERF_MAP = {
67     'HostCpusList': 'dpdk_cores',
68     'NeutronDpdkCoreList': 'pmd_cores',
69     'NeutronDpdkSocketMemory': 'socket_memory',
70     'NeutronDpdkMemoryChannels': 'memory_channels'
71 }
72
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOSETUP_SERVICE = """[Unit]
79 Description=Setup loop devices
80 Before=network.target
81
82 [Service]
83 Type=oneshot
84 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
85 ExecStop=/sbin/losetup -d /dev/loop3
86 TimeoutSec=60
87 RemainAfterExit=yes
88
89 [Install]
90 WantedBy=multi-user.target
91 """
92
93
94 def build_sdn_env_list(ds, sdn_map, env_list=None):
95     """
96     Builds a list of SDN environment files to be used in the deploy cmd.
97
98     This function recursively searches an sdn_map.  First the sdn controller is
99     matched and then the function looks for enabled features for that
100     controller to determine which environment files should be used.  By
101     default the feature will be checked if set to true in deploy settings to be
102     added to the list.  If a feature does not have a boolean value, then the
103     key and value pair to compare with are checked as a tuple (k,v).
104
105     :param ds: deploy settings
106     :param sdn_map: SDN map to recursively search
107     :param env_list: recursive var to hold previously found env_list
108     :return: A list of env files
109     """
110     if env_list is None:
111         env_list = list()
112     for k, v in sdn_map.items():
113         if ds['sdn_controller'] == k or (k in ds and ds[k]):
114             if isinstance(v, dict):
115                 # Append default SDN env file first
116                 # The assumption is that feature-enabled SDN env files
117                 # override and do not conflict with previously set default
118                 # settings
119                 if ds['sdn_controller'] == k and 'default' in v:
120                     env_list.append(os.path.join(con.THT_ENV_DIR,
121                                                  v['default']))
122                 env_list.extend(build_sdn_env_list(ds, v))
123             # check if the value is not a boolean
124             elif isinstance(v, tuple):
125                     if ds[k] == v[0]:
126                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
127             else:
128                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
129     if len(env_list) == 0:
130         try:
131             env_list.append(os.path.join(
132                 con.THT_ENV_DIR, sdn_map['default']))
133         except KeyError:
134             logging.warning("Unable to find default file for SDN")
135
136     return env_list
137
138
139 def get_docker_sdn_file(ds_opts):
140     """
141     Returns docker env file for detected SDN
142     :param ds_opts: deploy options
143     :return: docker THT env file for an SDN
144     """
145     # FIXME(trozet): We assume right now there is only one docker SDN file
146     docker_services = con.VALID_DOCKER_SERVICES
147     tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
148     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
149     for sdn_file in sdn_env_list:
150         sdn_base = os.path.basename(sdn_file)
151         if sdn_base in docker_services:
152             if docker_services[sdn_base] is not None:
153                 return os.path.join(tht_dir,
154                                     docker_services[sdn_base])
155             else:
156                 return os.path.join(tht_dir, sdn_base)
157
158
159 def create_deploy_cmd(ds, ns, inv, tmp_dir,
160                       virtual, env_file='opnfv-environment.yaml',
161                       net_data=False):
162
163     logging.info("Creating deployment command")
164     deploy_options = ['network-environment.yaml']
165
166     ds_opts = ds['deploy_options']
167
168     if ds_opts['containers']:
169         deploy_options.append(os.path.join(con.THT_ENV_DIR,
170                                            'docker.yaml'))
171
172     if ds['global_params']['ha_enabled']:
173         if ds_opts['containers']:
174             deploy_options.append(os.path.join(con.THT_ENV_DIR,
175                                                'docker-ha.yaml'))
176         else:
177             deploy_options.append(os.path.join(con.THT_ENV_DIR,
178                                                'puppet-pacemaker.yaml'))
179
180     if env_file:
181         deploy_options.append(env_file)
182
183     if ds_opts['containers']:
184         deploy_options.append('docker-images.yaml')
185         sdn_docker_file = get_docker_sdn_file(ds_opts)
186         if sdn_docker_file:
187             deploy_options.append(sdn_docker_file)
188             deploy_options.append('sdn-images.yaml')
189     else:
190         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
191
192     for k, v in OTHER_FILE_MAP.items():
193         if k in ds_opts and ds_opts[k]:
194             if ds_opts['containers']:
195                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
196                                                    "{}.yaml".format(k)))
197             else:
198                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
199
200     if ds_opts['ceph'] and 'csit' not in env_file:
201         prep_storage_env(ds, ns, virtual, tmp_dir)
202         deploy_options.append(os.path.join(con.THT_ENV_DIR,
203                                            'storage-environment.yaml'))
204     if ds_opts['sriov']:
205         prep_sriov_env(ds, tmp_dir)
206
207     # Check for 'k8s' here intentionally, as we may support other values
208     # such as openstack/openshift for 'vim' option.
209     if ds_opts['vim'] == 'k8s':
210         deploy_options.append('kubernetes-environment.yaml')
211
212     if virtual:
213         deploy_options.append('virtual-environment.yaml')
214     else:
215         deploy_options.append('baremetal-environment.yaml')
216
217     num_control, num_compute = inv.get_node_counts()
218     if num_control == 0 or num_compute == 0:
219         logging.error("Detected 0 control or compute nodes.  Control nodes: "
220                       "{}, compute nodes{}".format(num_control, num_compute))
221         raise ApexDeployException("Invalid number of control or computes")
222     elif num_control > 1 and not ds['global_params']['ha_enabled']:
223         num_control = 1
224     if platform.machine() == 'aarch64':
225         # aarch64 deploys were not completing in the default 90 mins.
226         # Not sure if this is related to the hardware the OOO support
227         # was developed on or the virtualization support in CentOS
228         # Either way it will probably get better over time  as the aarch
229         # support matures in CentOS and deploy time should be tested in
230         # the future so this multiplier can be removed.
231         con.DEPLOY_TIMEOUT *= 2
232     cmd = "openstack overcloud deploy --templates --timeout {} " \
233           .format(con.DEPLOY_TIMEOUT)
234     # build cmd env args
235     for option in deploy_options:
236         cmd += " -e {}".format(option)
237     cmd += " --ntp-server {}".format(ns['ntp'][0])
238     cmd += " --control-scale {}".format(num_control)
239     cmd += " --compute-scale {}".format(num_compute)
240     cmd += ' --control-flavor control --compute-flavor compute'
241     if net_data:
242         cmd += ' --networks-file network_data.yaml'
243     libvirt_type = 'kvm'
244     if virtual:
245         with open('/sys/module/kvm_intel/parameters/nested') as f:
246             nested_kvm = f.read().strip()
247             if nested_kvm != 'Y':
248                 libvirt_type = 'qemu'
249     cmd += ' --libvirt-type {}'.format(libvirt_type)
250     logging.info("Deploy command set: {}".format(cmd))
251
252     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
253         fh.write(cmd)
254     return cmd
255
256
257 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
258                patches=None):
259     """
260     Locates sdn image and preps for deployment.
261     :param ds: deploy settings
262     :param ns: network settings
263     :param img: sdn image
264     :param tmp_dir: dir to store modified sdn image
265     :param root_pw: password to configure for overcloud image
266     :param docker_tag: Docker image tag for RDO version (default None)
267     :param patches: List of patches to apply to overcloud image
268     :return: None
269     """
270     # TODO(trozet): Come up with a better way to organize this logic in this
271     # function
272     logging.info("Preparing image: {} for deployment".format(img))
273     if not os.path.isfile(img):
274         logging.error("Missing SDN image {}".format(img))
275         raise ApexDeployException("Missing SDN image file: {}".format(img))
276
277     ds_opts = ds['deploy_options']
278     virt_cmds = list()
279     sdn = ds_opts['sdn_controller']
280     patched_containers = set()
281     # we need this due to rhbz #1436021
282     # fixed in systemd-219-37.el7
283     if sdn is not False:
284         logging.info("Neutron openvswitch-agent disabled")
285         virt_cmds.extend([{
286             con.VIRT_RUN_CMD:
287                 "rm -f /etc/systemd/system/multi-user.target.wants/"
288                 "neutron-openvswitch-agent.service"},
289             {
290             con.VIRT_RUN_CMD:
291                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
292                 ".service"
293         }])
294
295     if ns.get('http_proxy', ''):
296         virt_cmds.append({
297             con.VIRT_RUN_CMD:
298                 "echo 'http_proxy={}' >> /etc/environment".format(
299                     ns['http_proxy'])})
300
301     if ns.get('https_proxy', ''):
302         virt_cmds.append({
303             con.VIRT_RUN_CMD:
304                 "echo 'https_proxy={}' >> /etc/environment".format(
305                     ns['https_proxy'])})
306
307     if ds_opts['vpn']:
308         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
309         virt_cmds.append({
310             con.VIRT_RUN_CMD:
311                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
312                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
313         virt_cmds.append({
314             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
315                               "zrpcd_start.sh"})
316         virt_cmds.append({
317             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
318                               "init.d/zrpcd_start.sh' /etc/rc.local "})
319         virt_cmds.append({
320             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
321                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
322         logging.info("ZRPCD process started")
323
324     dataplane = ds_opts['dataplane']
325     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
326         logging.info("Enabling kernel modules for dpdk")
327         # file to module mapping
328         uio_types = {
329             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
330             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
331         }
332         for mod_file, mod in uio_types.items():
333             with open(mod_file, 'w') as fh:
334                 fh.write('#!/bin/bash\n')
335                 fh.write('exec /sbin/modprobe {}'.format(mod))
336                 fh.close()
337
338             virt_cmds.extend([
339                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
340                     mod_file)},
341                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
342                                    "{}".format(os.path.basename(mod_file))}
343             ])
344     if root_pw:
345         pw_op = "password:{}".format(root_pw)
346         virt_cmds.append({con.VIRT_PW: pw_op})
347     if ds_opts['sfc'] and dataplane == 'ovs':
348         virt_cmds.extend([
349             {con.VIRT_RUN_CMD: "yum -y install "
350                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
351                                "{}".format(OVS_NSH_KMOD_RPM)},
352             {con.VIRT_RUN_CMD: "yum downgrade -y "
353                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
354                                "{}".format(OVS_NSH_RPM)}
355         ])
356     if dataplane == 'fdio':
357         # Patch neutron with using OVS external interface for router
358         # and add generic linux NS interface driver
359         virt_cmds.append(
360             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
361                                "-p1 < neutron-patch-NSDriver.patch"})
362         if sdn is False:
363             virt_cmds.extend([
364                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
365                 {con.VIRT_RUN_CMD: "yum install -y "
366                                    "/root/nosdn_vpp_rpms/*.rpm"}
367             ])
368
369     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
370     shutil.copyfile(img, tmp_oc_image)
371     logging.debug("Temporary overcloud image stored as: {}".format(
372         tmp_oc_image))
373
374     if sdn == 'opendaylight':
375         undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
376             'installer_vm']['ip']
377         oc_builder.inject_opendaylight(
378             odl_version=ds_opts['odl_version'],
379             image=tmp_oc_image,
380             tmp_dir=tmp_dir,
381             uc_ip=undercloud_admin_ip,
382             os_version=ds_opts['os_version'],
383             docker_tag=docker_tag,
384         )
385         if docker_tag:
386             patched_containers = patched_containers.union({'opendaylight'})
387
388     if patches:
389         if ds_opts['os_version'] == 'master':
390             branch = ds_opts['os_version']
391         else:
392             branch = "stable/{}".format(ds_opts['os_version'])
393         logging.info('Adding patches to overcloud')
394         patched_containers = patched_containers.union(
395             c_builder.add_upstream_patches(patches,
396                                            tmp_oc_image, tmp_dir,
397                                            branch,
398                                            uc_ip=undercloud_admin_ip,
399                                            docker_tag=docker_tag))
400     # if containers with ceph, and no ceph device we need to use a
401     # persistent loop device for Ceph OSDs
402     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
403         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
404         with open(tmp_losetup, 'w') as fh:
405             fh.write(LOSETUP_SERVICE)
406         virt_cmds.extend([
407             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
408              },
409             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
410             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
411             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
412         ])
413     # TODO(trozet) remove this after LP#173474 is fixed
414     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
415     virt_cmds.append(
416         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
417                            "ConditionPathExists".format(dhcp_unit)})
418     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
419     logging.info("Overcloud image customization complete")
420     return patched_containers
421
422
423 def make_ssh_key():
424     """
425     Creates public and private ssh keys with 1024 bit RSA encryption
426     :return: private, public key
427     """
428     key = rsa.generate_private_key(
429         backend=crypto_default_backend(),
430         public_exponent=65537,
431         key_size=1024
432     )
433
434     private_key = key.private_bytes(
435         crypto_serialization.Encoding.PEM,
436         crypto_serialization.PrivateFormat.PKCS8,
437         crypto_serialization.NoEncryption())
438     public_key = key.public_key().public_bytes(
439         crypto_serialization.Encoding.OpenSSH,
440         crypto_serialization.PublicFormat.OpenSSH
441     )
442     return private_key.decode('utf-8'), public_key.decode('utf-8')
443
444
445 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
446     """
447     Creates modified opnfv/network environments for deployment
448     :param ds: deploy settings
449     :param ns: network settings
450     :param inv: node inventory
451     :param opnfv_env: file path for opnfv-environment file
452     :param net_env: file path for network-environment file
453     :param tmp_dir: Apex tmp dir
454     :return:
455     """
456
457     logging.info("Preparing opnfv-environment and network-environment files")
458     ds_opts = ds['deploy_options']
459     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
460     shutil.copyfile(opnfv_env, tmp_opnfv_env)
461     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
462     tenant_nic = dict()
463     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
464     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
465     external_nic_map = ns['networks']['external'][0]['nic_mapping']
466     external_nic = dict()
467     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
468
469     # SSH keys
470     private_key, public_key = make_ssh_key()
471
472     # Make easier/faster variables to index in the file editor
473     if 'performance' in ds_opts:
474         perf = True
475         # vpp
476         if 'vpp' in ds_opts['performance']['Compute']:
477             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
478         else:
479             perf_vpp_comp = None
480         if 'vpp' in ds_opts['performance']['Controller']:
481             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
482         else:
483             perf_vpp_ctrl = None
484
485         # ovs
486         if 'ovs' in ds_opts['performance']['Compute']:
487             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
488         else:
489             perf_ovs_comp = None
490
491         # kernel
492         if 'kernel' in ds_opts['performance']['Compute']:
493             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
494         else:
495             perf_kern_comp = None
496     else:
497         perf = False
498
499     tenant_settings = ns['networks']['tenant']
500     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
501         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
502
503     # Modify OPNFV environment
504     # TODO: Change to build a dict and outputting yaml rather than parsing
505     for line in fileinput.input(tmp_opnfv_env, inplace=True):
506         line = line.strip('\n')
507         output_line = line
508         if 'CloudDomain' in line:
509             output_line = "  CloudDomain: {}".format(ns['domain_name'])
510         elif 'replace_private_key' in line:
511             output_line = "    private_key: |\n"
512             key_out = ''
513             for line in private_key.splitlines():
514                 key_out += "      {}\n".format(line)
515             output_line += key_out
516         elif 'replace_public_key' in line:
517             output_line = "    public_key: '{}'".format(public_key)
518         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
519                 'resource_registry' in line:
520             output_line = "resource_registry:\n" \
521                           "  OS::TripleO::NodeUserData: first-boot.yaml"
522         elif 'ComputeExtraConfigPre' in line and \
523                 ds_opts['dataplane'] == 'ovs_dpdk':
524             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
525                           './ovs-dpdk-preconfig.yaml'
526         elif 'NeutronNetworkVLANRanges' in line:
527             vlan_setting = ''
528             if tenant_vlan_enabled:
529                 if ns['networks']['tenant']['overlay_id_range']:
530                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
531                     if 'datacentre' not in vlan_setting:
532                         vlan_setting += ',datacentre:1:1000'
533             # SRIOV networks are VLAN based provider networks. In order to
534             # simplify the deployment, nfv_sriov will be the default physnet.
535             # VLANs are not needed in advance, and the user will have to create
536             # the network specifying the segmentation-id.
537             if ds_opts['sriov']:
538                 if vlan_setting:
539                     vlan_setting += ",nfv_sriov"
540                 else:
541                     vlan_setting = "datacentre:1:1000,nfv_sriov"
542             if vlan_setting:
543                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
544         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
545             if tenant_settings['overlay_id_range']:
546                 physnets = tenant_settings['overlay_id_range'].split(',')
547                 output_line = "  NeutronBridgeMappings: "
548                 for physnet in physnets:
549                     physnet_name = physnet.split(':')[0]
550                     if physnet_name != 'datacentre':
551                         output_line += "{}:br-vlan,".format(physnet_name)
552                 output_line += "datacentre:br-ex"
553         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
554                 and ds_opts['sdn_controller'] == 'opendaylight':
555             if tenant_settings['overlay_id_range']:
556                 physnets = tenant_settings['overlay_id_range'].split(',')
557                 output_line = "  OpenDaylightProviderMappings: "
558                 for physnet in physnets:
559                     physnet_name = physnet.split(':')[0]
560                     if physnet_name != 'datacentre':
561                         output_line += "{}:br-vlan,".format(physnet_name)
562                 output_line += "datacentre:br-ex"
563         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
564             output_line = "  NeutronNetworkType: vlan\n" \
565                           "  NeutronTunnelTypes: ''"
566
567         if ds_opts['sdn_controller'] == 'opendaylight' and \
568                 'odl_vpp_routing_node' in ds_opts:
569             if 'opendaylight::vpp_routing_node' in line:
570                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
571                                .format(ds_opts['odl_vpp_routing_node'],
572                                        ns['domain_name']))
573         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
574             if 'NeutronVPPAgentPhysnets' in line:
575                 # VPP interface tap0 will be used for external network
576                 # connectivity.
577                 output_line = ("  NeutronVPPAgentPhysnets: "
578                                "'datacentre:{},external:tap0'"
579                                .format(tenant_nic['Controller']))
580         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
581                 'dvr') is True:
582             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
583                 output_line = ''
584             elif 'NeutronDhcpAgentsPerNetwork' in line:
585                 num_control, num_compute = inv.get_node_counts()
586                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
587                                .format(num_compute))
588             elif 'ComputeServices' in line:
589                 output_line = ("  ComputeServices:\n"
590                                "    - OS::TripleO::Services::NeutronDhcpAgent")
591
592         if perf:
593             for role in 'NovaCompute', 'Controller':
594                 if role == 'NovaCompute':
595                     perf_opts = perf_vpp_comp
596                 else:
597                     perf_opts = perf_vpp_ctrl
598                 cfg = "{}ExtraConfig".format(role)
599                 if cfg in line and perf_opts:
600                     perf_line = ''
601                     if 'main-core' in perf_opts:
602                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
603                                       .format(perf_opts['main-core']))
604                     if 'corelist-workers' in perf_opts:
605                         perf_line += ("\n    "
606                                       "fdio::vpp_cpu_corelist_workers: '{}'"
607                                       .format(perf_opts['corelist-workers']))
608                     if ds_opts['sdn_controller'] == 'opendaylight' and \
609                             ds_opts['dataplane'] == 'fdio':
610                         if role == 'NovaCompute':
611                             perf_line += ("\n    "
612                                           "tripleo::profile::base::neutron::"
613                                           "agents::honeycomb::"
614                                           "interface_role_mapping:"
615                                           " ['{}:tenant-interface',"
616                                           "'{}:public-interface']"
617                                           .format(tenant_nic[role],
618                                                   external_nic[role]))
619                         else:
620                             perf_line += ("\n    "
621                                           "tripleo::profile::base::neutron::"
622                                           "agents::honeycomb::"
623                                           "interface_role_mapping:"
624                                           " ['{}:tenant-interface']"
625                                           .format(tenant_nic[role]))
626                     if perf_line:
627                         output_line = ("  {}:{}".format(cfg, perf_line))
628
629             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
630                 for k, v in OVS_PERF_MAP.items():
631                     if k in line and v in perf_ovs_comp:
632                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
633
634             # kernel args
635             # (FIXME) use compute's kernel settings for all nodes for now.
636             if perf_kern_comp:
637                 if 'NovaSchedulerDefaultFilters' in line:
638                     output_line = \
639                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
640                         "ComputeFilter,AvailabilityZoneFilter," \
641                         "ComputeCapabilitiesFilter," \
642                         "ImagePropertiesFilter,NUMATopologyFilter'"
643                 elif 'ComputeKernelArgs' in line:
644                     kernel_args = ''
645                     for k, v in perf_kern_comp.items():
646                         kernel_args += "{}={} ".format(k, v)
647                     if kernel_args:
648                         output_line = "  ComputeKernelArgs: '{}'".\
649                             format(kernel_args)
650
651         print(output_line)
652
653     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
654
655
656 def generate_ceph_key():
657     key = os.urandom(16)
658     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
659     return base64.b64encode(header + key)
660
661
662 def prep_storage_env(ds, ns, virtual, tmp_dir):
663     """
664     Creates storage environment file for deployment.  Source file is copied by
665     undercloud playbook to host.
666     :param ds:
667     :param ns:
668     :param virtual:
669     :param tmp_dir:
670     :return:
671     """
672     ds_opts = ds['deploy_options']
673     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
674     if not os.path.isfile(storage_file):
675         logging.error("storage-environment file is not in tmp directory: {}. "
676                       "Check if file was copied from "
677                       "undercloud".format(tmp_dir))
678         raise ApexDeployException("storage-environment file not copied from "
679                                   "undercloud")
680     for line in fileinput.input(storage_file, inplace=True):
681         line = line.strip('\n')
682         if 'CephClusterFSID' in line:
683             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
684         elif 'CephMonKey' in line:
685             print("  CephMonKey: {}".format(generate_ceph_key().decode(
686                 'utf-8')))
687         elif 'CephAdminKey' in line:
688             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
689                 'utf-8')))
690         elif 'CephClientKey' in line:
691             print("  CephClientKey: {}".format(generate_ceph_key().decode(
692                 'utf-8')))
693         else:
694             print(line)
695
696     if ds_opts['containers']:
697         undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
698             'installer_vm']['ip']
699         ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
700         docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
701                        "{}-centos-7".format(undercloud_admin_ip,
702                                             ceph_version)
703         ceph_params = {
704             'DockerCephDaemonImage': docker_image,
705         }
706
707         # max pgs allowed are calculated as num_mons * 200. Therefore we
708         # set number of pgs and pools so that the total will be less:
709         # num_pgs * num_pools * num_osds
710         ceph_params['CephPoolDefaultSize'] = 2
711         ceph_params['CephPoolDefaultPgNum'] = 32
712         if virtual:
713             ceph_params['CephAnsibleExtraConfig'] = {
714                 'centos_package_dependencies': [],
715                 'ceph_osd_docker_memory_limit': '1g',
716                 'ceph_mds_docker_memory_limit': '1g',
717             }
718         ceph_device = ds_opts['ceph_device']
719         ceph_params['CephAnsibleDisksConfig'] = {
720             'devices': [ceph_device],
721             'journal_size': 512,
722             'osd_scenario': 'collocated'
723         }
724         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
725     # TODO(trozet): remove following block as we only support containers now
726     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
727         with open(storage_file, 'a') as fh:
728             fh.write('  ExtraConfig:\n')
729             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
730                 ds_opts['ceph_device']
731             ))
732
733
734 def prep_sriov_env(ds, tmp_dir):
735     """
736     Creates SRIOV environment file for deployment. Source file is copied by
737     undercloud playbook to host.
738     :param ds:
739     :param tmp_dir:
740     :return:
741     """
742     ds_opts = ds['deploy_options']
743     sriov_iface = ds_opts['sriov']
744     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
745     if not os.path.isfile(sriov_file):
746         logging.error("sriov-environment file is not in tmp directory: {}. "
747                       "Check if file was copied from "
748                       "undercloud".format(tmp_dir))
749         raise ApexDeployException("sriov-environment file not copied from "
750                                   "undercloud")
751     # TODO(rnoriega): Instead of line editing, refactor this code to load
752     # yaml file into a dict, edit it and write the file back.
753     for line in fileinput.input(sriov_file, inplace=True):
754         line = line.strip('\n')
755         if 'NovaSchedulerDefaultFilters' in line:
756             print("  {}".format(line[3:]))
757         elif 'NovaSchedulerAvailableFilters' in line:
758             print("  {}".format(line[3:]))
759         elif 'NeutronPhysicalDevMappings' in line:
760             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
761                   .format(sriov_iface))
762         elif 'NeutronSriovNumVFs' in line:
763             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
764         elif 'NovaPCIPassthrough' in line:
765             print("  NovaPCIPassthrough:")
766         elif 'devname' in line:
767             print("    - devname: \"{}\"".format(sriov_iface))
768         elif 'physical_network' in line:
769             print("      physical_network: \"nfv_sriov\"")
770         else:
771             print(line)
772
773
774 def external_network_cmds(ns, ds):
775     """
776     Generates external network openstack commands
777     :param ns: network settings
778     :param ds: deploy settings
779     :return: list of commands to configure external network
780     """
781     ds_opts = ds['deploy_options']
782     external_physnet = 'datacentre'
783     if ds_opts['dataplane'] == 'fdio' and \
784        ds_opts['sdn_controller'] != 'opendaylight':
785         external_physnet = 'external'
786     if 'external' in ns.enabled_network_list:
787         net_config = ns['networks']['external'][0]
788         external = True
789         pool_start, pool_end = net_config['floating_ip_range']
790     else:
791         net_config = ns['networks']['admin']
792         external = False
793         pool_start, pool_end = ns['apex']['networks']['admin'][
794             'introspection_range']
795     nic_config = net_config['nic_mapping']
796     gateway = net_config['gateway']
797     cmds = list()
798     # create network command
799     if nic_config['compute']['vlan'] == 'native':
800         ext_type = 'flat'
801     else:
802         ext_type = "vlan --provider-segment {}".format(nic_config[
803                                                        'compute']['vlan'])
804     cmds.append("openstack network create external --project service "
805                 "--external --provider-network-type {} "
806                 "--provider-physical-network {}"
807                 .format(ext_type, external_physnet))
808     # create subnet command
809     cidr = net_config['cidr']
810     subnet_cmd = "openstack subnet create external-subnet --project " \
811                  "service --network external --no-dhcp --gateway {} " \
812                  "--allocation-pool start={},end={} --subnet-range " \
813                  "{}".format(gateway, pool_start, pool_end, str(cidr))
814     if external and cidr.version == 6:
815         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
816                       '--ipv6-address-mode slaac'
817     cmds.append(subnet_cmd)
818     logging.debug("Neutron external network commands determined "
819                   "as: {}".format(cmds))
820     return cmds
821
822
823 def create_congress_cmds(overcloud_file):
824     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
825     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
826     logging.info("Creating congress commands")
827     try:
828         ds_cfg = [
829             "username={}".format(overcloudrc['OS_USERNAME']),
830             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
831             "password={}".format(overcloudrc['OS_PASSWORD']),
832             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
833         ]
834     except KeyError:
835         logging.error("Unable to find all keys required for congress in "
836                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
837                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
838                       "file: {}".format(overcloud_file))
839         raise
840     cmds = list()
841     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
842
843     for driver in drivers:
844         if driver == 'doctor':
845             cmd = "{} \"{}\"".format(driver, driver)
846         else:
847             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
848         if driver == 'nova':
849             cmd += ' --config api_version="2.34"'
850         logging.debug("Congress command created: {}".format(cmd))
851         cmds.append(cmd)
852     return cmds