Increasing loop device size for baremetal deployments
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import shutil
16 import uuid
17 import struct
18 import time
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
21
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
28     crypto_serialization
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31     crypto_default_backend
32
33
34 SDN_FILE_MAP = {
35     'opendaylight': {
36         'sfc': 'neutron-sfc-opendaylight.yaml',
37         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38         'gluon': 'gluon.yaml',
39         'vpp': {
40             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42             'default': 'neutron-opendaylight-honeycomb.yaml'
43         },
44         'l2gw': 'neutron-l2gw-opendaylight.yaml',
45         'sriov': 'neutron-opendaylight-sriov.yaml',
46         'default': 'neutron-opendaylight.yaml',
47     },
48     'onos': {
49         'sfc': 'neutron-onos-sfc.yaml',
50         'default': 'neutron-onos.yaml'
51     },
52     'ovn': 'neutron-ml2-ovn.yaml',
53     False: {
54         'vpp': 'neutron-ml2-vpp.yaml',
55         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
56     }
57 }
58
59 OTHER_FILE_MAP = {
60     'tacker': 'enable_tacker.yaml',
61     'congress': 'enable_congress.yaml',
62     'barometer': 'enable_barometer.yaml',
63     'rt_kvm': 'enable_rt_kvm.yaml'
64 }
65
66 OVS_PERF_MAP = {
67     'HostCpusList': 'dpdk_cores',
68     'NeutronDpdkCoreList': 'pmd_cores',
69     'NeutronDpdkSocketMemory': 'socket_memory',
70     'NeutronDpdkMemoryChannels': 'memory_channels'
71 }
72
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOOP_DEVICE_SIZE = "10G"
79
80 LOSETUP_SERVICE = """[Unit]
81 Description=Setup loop devices
82 Before=network.target
83
84 [Service]
85 Type=oneshot
86 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
87 ExecStop=/sbin/losetup -d /dev/loop3
88 TimeoutSec=60
89 RemainAfterExit=yes
90
91 [Install]
92 WantedBy=multi-user.target
93 """
94
95
96 def build_sdn_env_list(ds, sdn_map, env_list=None):
97     """
98     Builds a list of SDN environment files to be used in the deploy cmd.
99
100     This function recursively searches an sdn_map.  First the sdn controller is
101     matched and then the function looks for enabled features for that
102     controller to determine which environment files should be used.  By
103     default the feature will be checked if set to true in deploy settings to be
104     added to the list.  If a feature does not have a boolean value, then the
105     key and value pair to compare with are checked as a tuple (k,v).
106
107     :param ds: deploy settings
108     :param sdn_map: SDN map to recursively search
109     :param env_list: recursive var to hold previously found env_list
110     :return: A list of env files
111     """
112     if env_list is None:
113         env_list = list()
114     for k, v in sdn_map.items():
115         if ds['sdn_controller'] == k or (k in ds and ds[k]):
116             if isinstance(v, dict):
117                 # Append default SDN env file first
118                 # The assumption is that feature-enabled SDN env files
119                 # override and do not conflict with previously set default
120                 # settings
121                 if ds['sdn_controller'] == k and 'default' in v:
122                     env_list.append(os.path.join(con.THT_ENV_DIR,
123                                                  v['default']))
124                 env_list.extend(build_sdn_env_list(ds, v))
125             # check if the value is not a boolean
126             elif isinstance(v, tuple):
127                     if ds[k] == v[0]:
128                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
129             else:
130                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
131     if len(env_list) == 0:
132         try:
133             env_list.append(os.path.join(
134                 con.THT_ENV_DIR, sdn_map['default']))
135         except KeyError:
136             logging.warning("Unable to find default file for SDN")
137
138     return env_list
139
140
141 def get_docker_sdn_file(ds_opts):
142     """
143     Returns docker env file for detected SDN
144     :param ds_opts: deploy options
145     :return: docker THT env file for an SDN
146     """
147     # FIXME(trozet): We assume right now there is only one docker SDN file
148     docker_services = con.VALID_DOCKER_SERVICES
149     tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
150     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
151     for sdn_file in sdn_env_list:
152         sdn_base = os.path.basename(sdn_file)
153         if sdn_base in docker_services:
154             if docker_services[sdn_base] is not None:
155                 return os.path.join(tht_dir,
156                                     docker_services[sdn_base])
157             else:
158                 return os.path.join(tht_dir, sdn_base)
159
160
161 def create_deploy_cmd(ds, ns, inv, tmp_dir,
162                       virtual, env_file='opnfv-environment.yaml',
163                       net_data=False):
164
165     logging.info("Creating deployment command")
166     deploy_options = ['network-environment.yaml']
167
168     ds_opts = ds['deploy_options']
169
170     if ds_opts['containers']:
171         deploy_options.append(os.path.join(con.THT_ENV_DIR,
172                                            'docker.yaml'))
173
174     if ds['global_params']['ha_enabled']:
175         if ds_opts['containers']:
176             deploy_options.append(os.path.join(con.THT_ENV_DIR,
177                                                'docker-ha.yaml'))
178         else:
179             deploy_options.append(os.path.join(con.THT_ENV_DIR,
180                                                'puppet-pacemaker.yaml'))
181
182     if env_file:
183         deploy_options.append(env_file)
184
185     if ds_opts['containers']:
186         deploy_options.append('docker-images.yaml')
187         sdn_docker_file = get_docker_sdn_file(ds_opts)
188         if sdn_docker_file:
189             deploy_options.append(sdn_docker_file)
190             deploy_options.append('sdn-images.yaml')
191     else:
192         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
193
194     for k, v in OTHER_FILE_MAP.items():
195         if k in ds_opts and ds_opts[k]:
196             if ds_opts['containers']:
197                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
198                                                    "{}.yaml".format(k)))
199             else:
200                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
201
202     if ds_opts['ceph'] and 'csit' not in env_file:
203         prep_storage_env(ds, ns, virtual, tmp_dir)
204         deploy_options.append(os.path.join(con.THT_ENV_DIR,
205                                            'storage-environment.yaml'))
206     if ds_opts['sriov']:
207         prep_sriov_env(ds, tmp_dir)
208
209     # Check for 'k8s' here intentionally, as we may support other values
210     # such as openstack/openshift for 'vim' option.
211     if ds_opts['vim'] == 'k8s':
212         deploy_options.append('kubernetes-environment.yaml')
213
214     if virtual:
215         deploy_options.append('virtual-environment.yaml')
216     else:
217         deploy_options.append('baremetal-environment.yaml')
218
219     num_control, num_compute = inv.get_node_counts()
220     if num_control == 0 or num_compute == 0:
221         logging.error("Detected 0 control or compute nodes.  Control nodes: "
222                       "{}, compute nodes{}".format(num_control, num_compute))
223         raise ApexDeployException("Invalid number of control or computes")
224     elif num_control > 1 and not ds['global_params']['ha_enabled']:
225         num_control = 1
226     if platform.machine() == 'aarch64':
227         # aarch64 deploys were not completing in the default 90 mins.
228         # Not sure if this is related to the hardware the OOO support
229         # was developed on or the virtualization support in CentOS
230         # Either way it will probably get better over time  as the aarch
231         # support matures in CentOS and deploy time should be tested in
232         # the future so this multiplier can be removed.
233         con.DEPLOY_TIMEOUT *= 2
234     cmd = "openstack overcloud deploy --templates --timeout {} " \
235           .format(con.DEPLOY_TIMEOUT)
236     # build cmd env args
237     for option in deploy_options:
238         cmd += " -e {}".format(option)
239     cmd += " --ntp-server {}".format(ns['ntp'][0])
240     cmd += " --control-scale {}".format(num_control)
241     cmd += " --compute-scale {}".format(num_compute)
242     cmd += ' --control-flavor control --compute-flavor compute'
243     if net_data:
244         cmd += ' --networks-file network_data.yaml'
245     libvirt_type = 'kvm'
246     if virtual:
247         with open('/sys/module/kvm_intel/parameters/nested') as f:
248             nested_kvm = f.read().strip()
249             if nested_kvm != 'Y':
250                 libvirt_type = 'qemu'
251     cmd += ' --libvirt-type {}'.format(libvirt_type)
252     logging.info("Deploy command set: {}".format(cmd))
253
254     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
255         fh.write(cmd)
256     return cmd
257
258
259 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
260                patches=None):
261     """
262     Locates sdn image and preps for deployment.
263     :param ds: deploy settings
264     :param ns: network settings
265     :param img: sdn image
266     :param tmp_dir: dir to store modified sdn image
267     :param root_pw: password to configure for overcloud image
268     :param docker_tag: Docker image tag for RDO version (default None)
269     :param patches: List of patches to apply to overcloud image
270     :return: None
271     """
272     # TODO(trozet): Come up with a better way to organize this logic in this
273     # function
274     logging.info("Preparing image: {} for deployment".format(img))
275     if not os.path.isfile(img):
276         logging.error("Missing SDN image {}".format(img))
277         raise ApexDeployException("Missing SDN image file: {}".format(img))
278
279     ds_opts = ds['deploy_options']
280     virt_cmds = list()
281     sdn = ds_opts['sdn_controller']
282     patched_containers = set()
283     # we need this due to rhbz #1436021
284     # fixed in systemd-219-37.el7
285     if sdn is not False:
286         logging.info("Neutron openvswitch-agent disabled")
287         virt_cmds.extend([{
288             con.VIRT_RUN_CMD:
289                 "rm -f /etc/systemd/system/multi-user.target.wants/"
290                 "neutron-openvswitch-agent.service"},
291             {
292             con.VIRT_RUN_CMD:
293                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
294                 ".service"
295         }])
296
297     if ns.get('http_proxy', ''):
298         virt_cmds.append({
299             con.VIRT_RUN_CMD:
300                 "echo 'http_proxy={}' >> /etc/environment".format(
301                     ns['http_proxy'])})
302
303     if ns.get('https_proxy', ''):
304         virt_cmds.append({
305             con.VIRT_RUN_CMD:
306                 "echo 'https_proxy={}' >> /etc/environment".format(
307                     ns['https_proxy'])})
308
309     if ds_opts['vpn']:
310         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
311         virt_cmds.append({
312             con.VIRT_RUN_CMD:
313                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
314                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
315         virt_cmds.append({
316             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
317                               "zrpcd_start.sh"})
318         virt_cmds.append({
319             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
320                               "init.d/zrpcd_start.sh' /etc/rc.local "})
321         virt_cmds.append({
322             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
323                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
324         logging.info("ZRPCD process started")
325
326     dataplane = ds_opts['dataplane']
327     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
328         logging.info("Enabling kernel modules for dpdk")
329         # file to module mapping
330         uio_types = {
331             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
332             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
333         }
334         for mod_file, mod in uio_types.items():
335             with open(mod_file, 'w') as fh:
336                 fh.write('#!/bin/bash\n')
337                 fh.write('exec /sbin/modprobe {}'.format(mod))
338                 fh.close()
339
340             virt_cmds.extend([
341                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
342                     mod_file)},
343                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
344                                    "{}".format(os.path.basename(mod_file))}
345             ])
346     if root_pw:
347         pw_op = "password:{}".format(root_pw)
348         virt_cmds.append({con.VIRT_PW: pw_op})
349     if ds_opts['sfc'] and dataplane == 'ovs':
350         virt_cmds.extend([
351             {con.VIRT_RUN_CMD: "yum -y install "
352                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
353                                "{}".format(OVS_NSH_KMOD_RPM)},
354             {con.VIRT_RUN_CMD: "yum downgrade -y "
355                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
356                                "{}".format(OVS_NSH_RPM)}
357         ])
358     if dataplane == 'fdio':
359         # Patch neutron with using OVS external interface for router
360         # and add generic linux NS interface driver
361         virt_cmds.append(
362             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
363                                "-p1 < neutron-patch-NSDriver.patch"})
364         if sdn is False:
365             virt_cmds.extend([
366                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
367                 {con.VIRT_RUN_CMD: "yum install -y "
368                                    "/root/nosdn_vpp_rpms/*.rpm"}
369             ])
370
371     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
372     shutil.copyfile(img, tmp_oc_image)
373     logging.debug("Temporary overcloud image stored as: {}".format(
374         tmp_oc_image))
375
376     if sdn == 'opendaylight':
377         undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
378             'installer_vm']['ip']
379         oc_builder.inject_opendaylight(
380             odl_version=ds_opts['odl_version'],
381             image=tmp_oc_image,
382             tmp_dir=tmp_dir,
383             uc_ip=undercloud_admin_ip,
384             os_version=ds_opts['os_version'],
385             docker_tag=docker_tag,
386         )
387         if docker_tag:
388             patched_containers = patched_containers.union({'opendaylight'})
389
390     if patches:
391         if ds_opts['os_version'] == 'master':
392             branch = ds_opts['os_version']
393         else:
394             branch = "stable/{}".format(ds_opts['os_version'])
395         logging.info('Adding patches to overcloud')
396         patched_containers = patched_containers.union(
397             c_builder.add_upstream_patches(patches,
398                                            tmp_oc_image, tmp_dir,
399                                            branch,
400                                            uc_ip=undercloud_admin_ip,
401                                            docker_tag=docker_tag))
402     # if containers with ceph, and no ceph device we need to use a
403     # persistent loop device for Ceph OSDs
404     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
405         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
406         with open(tmp_losetup, 'w') as fh:
407             fh.write(LOSETUP_SERVICE)
408         virt_cmds.extend([
409             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
410              },
411             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
412                 .format(LOOP_DEVICE_SIZE)},
413             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
414             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
415         ])
416     # TODO(trozet) remove this after LP#173474 is fixed
417     dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
418     virt_cmds.append(
419         {con.VIRT_RUN_CMD: "crudini --del {} Unit "
420                            "ConditionPathExists".format(dhcp_unit)})
421     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
422     logging.info("Overcloud image customization complete")
423     return patched_containers
424
425
426 def make_ssh_key():
427     """
428     Creates public and private ssh keys with 1024 bit RSA encryption
429     :return: private, public key
430     """
431     key = rsa.generate_private_key(
432         backend=crypto_default_backend(),
433         public_exponent=65537,
434         key_size=1024
435     )
436
437     private_key = key.private_bytes(
438         crypto_serialization.Encoding.PEM,
439         crypto_serialization.PrivateFormat.PKCS8,
440         crypto_serialization.NoEncryption())
441     public_key = key.public_key().public_bytes(
442         crypto_serialization.Encoding.OpenSSH,
443         crypto_serialization.PublicFormat.OpenSSH
444     )
445     return private_key.decode('utf-8'), public_key.decode('utf-8')
446
447
448 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
449     """
450     Creates modified opnfv/network environments for deployment
451     :param ds: deploy settings
452     :param ns: network settings
453     :param inv: node inventory
454     :param opnfv_env: file path for opnfv-environment file
455     :param net_env: file path for network-environment file
456     :param tmp_dir: Apex tmp dir
457     :return:
458     """
459
460     logging.info("Preparing opnfv-environment and network-environment files")
461     ds_opts = ds['deploy_options']
462     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
463     shutil.copyfile(opnfv_env, tmp_opnfv_env)
464     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
465     tenant_nic = dict()
466     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
467     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
468     external_nic_map = ns['networks']['external'][0]['nic_mapping']
469     external_nic = dict()
470     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
471
472     # SSH keys
473     private_key, public_key = make_ssh_key()
474
475     # Make easier/faster variables to index in the file editor
476     if 'performance' in ds_opts:
477         perf = True
478         # vpp
479         if 'vpp' in ds_opts['performance']['Compute']:
480             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
481         else:
482             perf_vpp_comp = None
483         if 'vpp' in ds_opts['performance']['Controller']:
484             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
485         else:
486             perf_vpp_ctrl = None
487
488         # ovs
489         if 'ovs' in ds_opts['performance']['Compute']:
490             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
491         else:
492             perf_ovs_comp = None
493
494         # kernel
495         if 'kernel' in ds_opts['performance']['Compute']:
496             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
497         else:
498             perf_kern_comp = None
499     else:
500         perf = False
501
502     tenant_settings = ns['networks']['tenant']
503     tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
504         ns['networks']['tenant'].get('segmentation_type') == 'vlan'
505
506     # Modify OPNFV environment
507     # TODO: Change to build a dict and outputting yaml rather than parsing
508     for line in fileinput.input(tmp_opnfv_env, inplace=True):
509         line = line.strip('\n')
510         output_line = line
511         if 'CloudDomain' in line:
512             output_line = "  CloudDomain: {}".format(ns['domain_name'])
513         elif 'replace_private_key' in line:
514             output_line = "    private_key: |\n"
515             key_out = ''
516             for line in private_key.splitlines():
517                 key_out += "      {}\n".format(line)
518             output_line += key_out
519         elif 'replace_public_key' in line:
520             output_line = "    public_key: '{}'".format(public_key)
521         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
522                 'resource_registry' in line:
523             output_line = "resource_registry:\n" \
524                           "  OS::TripleO::NodeUserData: first-boot.yaml"
525         elif 'ComputeExtraConfigPre' in line and \
526                 ds_opts['dataplane'] == 'ovs_dpdk':
527             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
528                           './ovs-dpdk-preconfig.yaml'
529         elif 'NeutronNetworkVLANRanges' in line:
530             vlan_setting = ''
531             if tenant_vlan_enabled:
532                 if ns['networks']['tenant']['overlay_id_range']:
533                     vlan_setting = ns['networks']['tenant']['overlay_id_range']
534                     if 'datacentre' not in vlan_setting:
535                         vlan_setting += ',datacentre:1:1000'
536             # SRIOV networks are VLAN based provider networks. In order to
537             # simplify the deployment, nfv_sriov will be the default physnet.
538             # VLANs are not needed in advance, and the user will have to create
539             # the network specifying the segmentation-id.
540             if ds_opts['sriov']:
541                 if vlan_setting:
542                     vlan_setting += ",nfv_sriov"
543                 else:
544                     vlan_setting = "datacentre:1:1000,nfv_sriov"
545             if vlan_setting:
546                 output_line = "  NeutronNetworkVLANRanges: " + vlan_setting
547         elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
548             if tenant_settings['overlay_id_range']:
549                 physnets = tenant_settings['overlay_id_range'].split(',')
550                 output_line = "  NeutronBridgeMappings: "
551                 for physnet in physnets:
552                     physnet_name = physnet.split(':')[0]
553                     if physnet_name != 'datacentre':
554                         output_line += "{}:br-vlan,".format(physnet_name)
555                 output_line += "datacentre:br-ex"
556         elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
557                 and ds_opts['sdn_controller'] == 'opendaylight':
558             if tenant_settings['overlay_id_range']:
559                 physnets = tenant_settings['overlay_id_range'].split(',')
560                 output_line = "  OpenDaylightProviderMappings: "
561                 for physnet in physnets:
562                     physnet_name = physnet.split(':')[0]
563                     if physnet_name != 'datacentre':
564                         output_line += "{}:br-vlan,".format(physnet_name)
565                 output_line += "datacentre:br-ex"
566         elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
567             output_line = "  NeutronNetworkType: vlan\n" \
568                           "  NeutronTunnelTypes: ''"
569
570         if ds_opts['sdn_controller'] == 'opendaylight' and \
571                 'odl_vpp_routing_node' in ds_opts:
572             if 'opendaylight::vpp_routing_node' in line:
573                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
574                                .format(ds_opts['odl_vpp_routing_node'],
575                                        ns['domain_name']))
576         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
577             if 'NeutronVPPAgentPhysnets' in line:
578                 # VPP interface tap0 will be used for external network
579                 # connectivity.
580                 output_line = ("  NeutronVPPAgentPhysnets: "
581                                "'datacentre:{},external:tap0'"
582                                .format(tenant_nic['Controller']))
583         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
584                 'dvr') is True:
585             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
586                 output_line = ''
587             elif 'NeutronDhcpAgentsPerNetwork' in line:
588                 num_control, num_compute = inv.get_node_counts()
589                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
590                                .format(num_compute))
591             elif 'ComputeServices' in line:
592                 output_line = ("  ComputeServices:\n"
593                                "    - OS::TripleO::Services::NeutronDhcpAgent")
594
595         if perf:
596             for role in 'NovaCompute', 'Controller':
597                 if role == 'NovaCompute':
598                     perf_opts = perf_vpp_comp
599                 else:
600                     perf_opts = perf_vpp_ctrl
601                 cfg = "{}ExtraConfig".format(role)
602                 if cfg in line and perf_opts:
603                     perf_line = ''
604                     if 'main-core' in perf_opts:
605                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
606                                       .format(perf_opts['main-core']))
607                     if 'corelist-workers' in perf_opts:
608                         perf_line += ("\n    "
609                                       "fdio::vpp_cpu_corelist_workers: '{}'"
610                                       .format(perf_opts['corelist-workers']))
611                     if ds_opts['sdn_controller'] == 'opendaylight' and \
612                             ds_opts['dataplane'] == 'fdio':
613                         if role == 'NovaCompute':
614                             perf_line += ("\n    "
615                                           "tripleo::profile::base::neutron::"
616                                           "agents::honeycomb::"
617                                           "interface_role_mapping:"
618                                           " ['{}:tenant-interface',"
619                                           "'{}:public-interface']"
620                                           .format(tenant_nic[role],
621                                                   external_nic[role]))
622                         else:
623                             perf_line += ("\n    "
624                                           "tripleo::profile::base::neutron::"
625                                           "agents::honeycomb::"
626                                           "interface_role_mapping:"
627                                           " ['{}:tenant-interface']"
628                                           .format(tenant_nic[role]))
629                     if perf_line:
630                         output_line = ("  {}:{}".format(cfg, perf_line))
631
632             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
633                 for k, v in OVS_PERF_MAP.items():
634                     if k in line and v in perf_ovs_comp:
635                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
636
637             # kernel args
638             # (FIXME) use compute's kernel settings for all nodes for now.
639             if perf_kern_comp:
640                 if 'NovaSchedulerDefaultFilters' in line:
641                     output_line = \
642                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
643                         "ComputeFilter,AvailabilityZoneFilter," \
644                         "ComputeCapabilitiesFilter," \
645                         "ImagePropertiesFilter,NUMATopologyFilter'"
646                 elif 'ComputeKernelArgs' in line:
647                     kernel_args = ''
648                     for k, v in perf_kern_comp.items():
649                         kernel_args += "{}={} ".format(k, v)
650                     if kernel_args:
651                         output_line = "  ComputeKernelArgs: '{}'".\
652                             format(kernel_args)
653
654         print(output_line)
655
656     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
657
658
659 def generate_ceph_key():
660     key = os.urandom(16)
661     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
662     return base64.b64encode(header + key)
663
664
665 def prep_storage_env(ds, ns, virtual, tmp_dir):
666     """
667     Creates storage environment file for deployment.  Source file is copied by
668     undercloud playbook to host.
669     :param ds:
670     :param ns:
671     :param virtual:
672     :param tmp_dir:
673     :return:
674     """
675     ds_opts = ds['deploy_options']
676     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
677     if not os.path.isfile(storage_file):
678         logging.error("storage-environment file is not in tmp directory: {}. "
679                       "Check if file was copied from "
680                       "undercloud".format(tmp_dir))
681         raise ApexDeployException("storage-environment file not copied from "
682                                   "undercloud")
683     for line in fileinput.input(storage_file, inplace=True):
684         line = line.strip('\n')
685         if 'CephClusterFSID' in line:
686             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
687         elif 'CephMonKey' in line:
688             print("  CephMonKey: {}".format(generate_ceph_key().decode(
689                 'utf-8')))
690         elif 'CephAdminKey' in line:
691             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
692                 'utf-8')))
693         elif 'CephClientKey' in line:
694             print("  CephClientKey: {}".format(generate_ceph_key().decode(
695                 'utf-8')))
696         else:
697             print(line)
698
699     if ds_opts['containers']:
700         ceph_params = {}
701
702         # max pgs allowed are calculated as num_mons * 200. Therefore we
703         # set number of pgs and pools so that the total will be less:
704         # num_pgs * num_pools * num_osds
705         ceph_params['CephPoolDefaultSize'] = 2
706         ceph_params['CephPoolDefaultPgNum'] = 32
707         if virtual:
708             ceph_params['CephAnsibleExtraConfig'] = {
709                 'centos_package_dependencies': [],
710                 'ceph_osd_docker_memory_limit': '1g',
711                 'ceph_mds_docker_memory_limit': '1g',
712             }
713         ceph_device = ds_opts['ceph_device']
714         ceph_params['CephAnsibleDisksConfig'] = {
715             'devices': [ceph_device],
716             'journal_size': 512,
717             'osd_scenario': 'collocated'
718         }
719         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
720     # TODO(trozet): remove following block as we only support containers now
721     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
722         with open(storage_file, 'a') as fh:
723             fh.write('  ExtraConfig:\n')
724             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
725                 ds_opts['ceph_device']
726             ))
727
728
729 def prep_sriov_env(ds, tmp_dir):
730     """
731     Creates SRIOV environment file for deployment. Source file is copied by
732     undercloud playbook to host.
733     :param ds:
734     :param tmp_dir:
735     :return:
736     """
737     ds_opts = ds['deploy_options']
738     sriov_iface = ds_opts['sriov']
739     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
740     if not os.path.isfile(sriov_file):
741         logging.error("sriov-environment file is not in tmp directory: {}. "
742                       "Check if file was copied from "
743                       "undercloud".format(tmp_dir))
744         raise ApexDeployException("sriov-environment file not copied from "
745                                   "undercloud")
746     # TODO(rnoriega): Instead of line editing, refactor this code to load
747     # yaml file into a dict, edit it and write the file back.
748     for line in fileinput.input(sriov_file, inplace=True):
749         line = line.strip('\n')
750         if 'NovaSchedulerDefaultFilters' in line:
751             print("  {}".format(line[3:]))
752         elif 'NovaSchedulerAvailableFilters' in line:
753             print("  {}".format(line[3:]))
754         elif 'NeutronPhysicalDevMappings' in line:
755             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
756                   .format(sriov_iface))
757         elif 'NeutronSriovNumVFs' in line:
758             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
759         elif 'NovaPCIPassthrough' in line:
760             print("  NovaPCIPassthrough:")
761         elif 'devname' in line:
762             print("    - devname: \"{}\"".format(sriov_iface))
763         elif 'physical_network' in line:
764             print("      physical_network: \"nfv_sriov\"")
765         else:
766             print(line)
767
768
769 def external_network_cmds(ns, ds):
770     """
771     Generates external network openstack commands
772     :param ns: network settings
773     :param ds: deploy settings
774     :return: list of commands to configure external network
775     """
776     ds_opts = ds['deploy_options']
777     external_physnet = 'datacentre'
778     if ds_opts['dataplane'] == 'fdio' and \
779        ds_opts['sdn_controller'] != 'opendaylight':
780         external_physnet = 'external'
781     if 'external' in ns.enabled_network_list:
782         net_config = ns['networks']['external'][0]
783         external = True
784         pool_start, pool_end = net_config['floating_ip_range']
785     else:
786         net_config = ns['networks']['admin']
787         external = False
788         pool_start, pool_end = ns['apex']['networks']['admin'][
789             'introspection_range']
790     nic_config = net_config['nic_mapping']
791     gateway = net_config['gateway']
792     cmds = list()
793     # create network command
794     if nic_config['compute']['vlan'] == 'native':
795         ext_type = 'flat'
796     else:
797         ext_type = "vlan --provider-segment {}".format(nic_config[
798                                                        'compute']['vlan'])
799     cmds.append("openstack network create external --project service "
800                 "--external --provider-network-type {} "
801                 "--provider-physical-network {}"
802                 .format(ext_type, external_physnet))
803     # create subnet command
804     cidr = net_config['cidr']
805     subnet_cmd = "openstack subnet create external-subnet --project " \
806                  "service --network external --no-dhcp --gateway {} " \
807                  "--allocation-pool start={},end={} --subnet-range " \
808                  "{}".format(gateway, pool_start, pool_end, str(cidr))
809     if external and cidr.version == 6:
810         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
811                       '--ipv6-address-mode slaac'
812     cmds.append(subnet_cmd)
813     logging.debug("Neutron external network commands determined "
814                   "as: {}".format(cmds))
815     return cmds
816
817
818 def create_congress_cmds(overcloud_file):
819     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
820     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
821     logging.info("Creating congress commands")
822     try:
823         ds_cfg = [
824             "username={}".format(overcloudrc['OS_USERNAME']),
825             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
826             "password={}".format(overcloudrc['OS_PASSWORD']),
827             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
828         ]
829     except KeyError:
830         logging.error("Unable to find all keys required for congress in "
831                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
832                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
833                       "file: {}".format(overcloud_file))
834         raise
835     cmds = list()
836     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
837
838     for driver in drivers:
839         if driver == 'doctor':
840             cmd = "{} \"{}\"".format(driver, driver)
841         else:
842             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
843         if driver == 'nova':
844             cmd += ' --config api_version="2.34"'
845         logging.debug("Congress command created: {}".format(cmd))
846         cmds.append(cmd)
847     return cmds