Deprecates the use of '--upstream' argument
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import shutil
16 import uuid
17 import struct
18 import time
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
21
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
28     crypto_serialization
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31     crypto_default_backend
32
33
34 SDN_FILE_MAP = {
35     'opendaylight': {
36         'sfc': 'neutron-sfc-opendaylight.yaml',
37         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38         'gluon': 'gluon.yaml',
39         'vpp': {
40             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42             'default': 'neutron-opendaylight-honeycomb.yaml'
43         },
44         'l2gw': 'neutron-l2gw-opendaylight.yaml',
45         'sriov': 'neutron-opendaylight-sriov.yaml',
46         'default': 'neutron-opendaylight.yaml',
47     },
48     'onos': {
49         'sfc': 'neutron-onos-sfc.yaml',
50         'default': 'neutron-onos.yaml'
51     },
52     'ovn': 'neutron-ml2-ovn.yaml',
53     False: {
54         'vpp': 'neutron-ml2-vpp.yaml',
55         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
56     }
57 }
58
59 OTHER_FILE_MAP = {
60     'tacker': 'enable_tacker.yaml',
61     'congress': 'enable_congress.yaml',
62     'barometer': 'enable_barometer.yaml',
63     'rt_kvm': 'enable_rt_kvm.yaml'
64 }
65
66 OVS_PERF_MAP = {
67     'HostCpusList': 'dpdk_cores',
68     'NeutronDpdkCoreList': 'pmd_cores',
69     'NeutronDpdkSocketMemory': 'socket_memory',
70     'NeutronDpdkMemoryChannels': 'memory_channels'
71 }
72
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOSETUP_SERVICE = """[Unit]
79 Description=Setup loop devices
80 Before=network.target
81
82 [Service]
83 Type=oneshot
84 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
85 ExecStop=/sbin/losetup -d /dev/loop3
86 TimeoutSec=60
87 RemainAfterExit=yes
88
89 [Install]
90 WantedBy=multi-user.target
91 """
92
93
94 def build_sdn_env_list(ds, sdn_map, env_list=None):
95     """
96     Builds a list of SDN environment files to be used in the deploy cmd.
97
98     This function recursively searches an sdn_map.  First the sdn controller is
99     matched and then the function looks for enabled features for that
100     controller to determine which environment files should be used.  By
101     default the feature will be checked if set to true in deploy settings to be
102     added to the list.  If a feature does not have a boolean value, then the
103     key and value pair to compare with are checked as a tuple (k,v).
104
105     :param ds: deploy settings
106     :param sdn_map: SDN map to recursively search
107     :param env_list: recursive var to hold previously found env_list
108     :return: A list of env files
109     """
110     if env_list is None:
111         env_list = list()
112     for k, v in sdn_map.items():
113         if ds['sdn_controller'] == k or (k in ds and ds[k]):
114             if isinstance(v, dict):
115                 # Append default SDN env file first
116                 # The assumption is that feature-enabled SDN env files
117                 # override and do not conflict with previously set default
118                 # settings
119                 if ds['sdn_controller'] == k and 'default' in v:
120                     env_list.append(os.path.join(con.THT_ENV_DIR,
121                                                  v['default']))
122                 env_list.extend(build_sdn_env_list(ds, v))
123             # check if the value is not a boolean
124             elif isinstance(v, tuple):
125                     if ds[k] == v[0]:
126                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
127             else:
128                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
129     if len(env_list) == 0:
130         try:
131             env_list.append(os.path.join(
132                 con.THT_ENV_DIR, sdn_map['default']))
133         except KeyError:
134             logging.warning("Unable to find default file for SDN")
135
136     return env_list
137
138
139 def get_docker_sdn_file(ds_opts):
140     """
141     Returns docker env file for detected SDN
142     :param ds_opts: deploy options
143     :return: docker THT env file for an SDN
144     """
145     # FIXME(trozet): We assume right now there is only one docker SDN file
146     docker_services = con.VALID_DOCKER_SERVICES
147     tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
148     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
149     for sdn_file in sdn_env_list:
150         sdn_base = os.path.basename(sdn_file)
151         if sdn_base in docker_services:
152             if docker_services[sdn_base] is not None:
153                 return os.path.join(tht_dir,
154                                     docker_services[sdn_base])
155             else:
156                 return os.path.join(tht_dir, sdn_base)
157
158
159 def create_deploy_cmd(ds, ns, inv, tmp_dir,
160                       virtual, env_file='opnfv-environment.yaml',
161                       net_data=False):
162
163     logging.info("Creating deployment command")
164     deploy_options = ['network-environment.yaml']
165
166     ds_opts = ds['deploy_options']
167
168     if ds_opts['containers']:
169         deploy_options.append(os.path.join(con.THT_ENV_DIR,
170                                            'docker.yaml'))
171
172     if ds['global_params']['ha_enabled']:
173         if ds_opts['containers']:
174             deploy_options.append(os.path.join(con.THT_ENV_DIR,
175                                                'docker-ha.yaml'))
176         else:
177             deploy_options.append(os.path.join(con.THT_ENV_DIR,
178                                                'puppet-pacemaker.yaml'))
179
180     if env_file:
181         deploy_options.append(env_file)
182
183     if ds_opts['containers']:
184         deploy_options.append('docker-images.yaml')
185         sdn_docker_file = get_docker_sdn_file(ds_opts)
186         if sdn_docker_file:
187             deploy_options.append(sdn_docker_file)
188             deploy_options.append('sdn-images.yaml')
189     else:
190         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
191
192     for k, v in OTHER_FILE_MAP.items():
193         if k in ds_opts and ds_opts[k]:
194             if ds_opts['containers']:
195                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
196                                                    "{}.yaml".format(k)))
197             else:
198                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
199
200     if ds_opts['ceph'] and 'csit' not in env_file:
201         prep_storage_env(ds, ns, virtual, tmp_dir)
202         deploy_options.append(os.path.join(con.THT_ENV_DIR,
203                                            'storage-environment.yaml'))
204     if ds_opts['sriov']:
205         prep_sriov_env(ds, tmp_dir)
206
207     if virtual:
208         deploy_options.append('virtual-environment.yaml')
209     else:
210         deploy_options.append('baremetal-environment.yaml')
211
212     num_control, num_compute = inv.get_node_counts()
213     if num_control == 0 or num_compute == 0:
214         logging.error("Detected 0 control or compute nodes.  Control nodes: "
215                       "{}, compute nodes{}".format(num_control, num_compute))
216         raise ApexDeployException("Invalid number of control or computes")
217     elif num_control > 1 and not ds['global_params']['ha_enabled']:
218         num_control = 1
219     if platform.machine() == 'aarch64':
220         # aarch64 deploys were not completing in the default 90 mins.
221         # Not sure if this is related to the hardware the OOO support
222         # was developed on or the virtualization support in CentOS
223         # Either way it will probably get better over time  as the aarch
224         # support matures in CentOS and deploy time should be tested in
225         # the future so this multiplier can be removed.
226         con.DEPLOY_TIMEOUT *= 2
227     cmd = "openstack overcloud deploy --templates --timeout {} " \
228           .format(con.DEPLOY_TIMEOUT)
229     # build cmd env args
230     for option in deploy_options:
231         cmd += " -e {}".format(option)
232     cmd += " --ntp-server {}".format(ns['ntp'][0])
233     cmd += " --control-scale {}".format(num_control)
234     cmd += " --compute-scale {}".format(num_compute)
235     cmd += ' --control-flavor control --compute-flavor compute'
236     if net_data:
237         cmd += ' --networks-file network_data.yaml'
238     libvirt_type = 'kvm'
239     if virtual:
240         with open('/sys/module/kvm_intel/parameters/nested') as f:
241             nested_kvm = f.read().strip()
242             if nested_kvm != 'Y':
243                 libvirt_type = 'qemu'
244     cmd += ' --libvirt-type {}'.format(libvirt_type)
245     logging.info("Deploy command set: {}".format(cmd))
246
247     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
248         fh.write(cmd)
249     return cmd
250
251
252 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
253                patches=None):
254     """
255     Locates sdn image and preps for deployment.
256     :param ds: deploy settings
257     :param ns: network settings
258     :param img: sdn image
259     :param tmp_dir: dir to store modified sdn image
260     :param root_pw: password to configure for overcloud image
261     :param docker_tag: Docker image tag for RDO version (default None)
262     :param patches: List of patches to apply to overcloud image
263     :return: None
264     """
265     # TODO(trozet): Come up with a better way to organize this logic in this
266     # function
267     logging.info("Preparing image: {} for deployment".format(img))
268     if not os.path.isfile(img):
269         logging.error("Missing SDN image {}".format(img))
270         raise ApexDeployException("Missing SDN image file: {}".format(img))
271
272     ds_opts = ds['deploy_options']
273     virt_cmds = list()
274     sdn = ds_opts['sdn_controller']
275     patched_containers = set()
276     # we need this due to rhbz #1436021
277     # fixed in systemd-219-37.el7
278     if sdn is not False:
279         logging.info("Neutron openvswitch-agent disabled")
280         virt_cmds.extend([{
281             con.VIRT_RUN_CMD:
282                 "rm -f /etc/systemd/system/multi-user.target.wants/"
283                 "neutron-openvswitch-agent.service"},
284             {
285             con.VIRT_RUN_CMD:
286                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
287                 ".service"
288         }])
289
290     if ns.get('http_proxy', ''):
291         virt_cmds.append({
292             con.VIRT_RUN_CMD:
293                 "echo 'http_proxy={}' >> /etc/environment".format(
294                     ns['http_proxy'])})
295
296     if ns.get('https_proxy', ''):
297         virt_cmds.append({
298             con.VIRT_RUN_CMD:
299                 "echo 'https_proxy={}' >> /etc/environment".format(
300                     ns['https_proxy'])})
301
302     if ds_opts['vpn']:
303         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
304         virt_cmds.append({
305             con.VIRT_RUN_CMD:
306                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
307                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
308         virt_cmds.append({
309             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
310                               "zrpcd_start.sh"})
311         virt_cmds.append({
312             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
313                               "init.d/zrpcd_start.sh' /etc/rc.local "})
314         virt_cmds.append({
315             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
316                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
317         logging.info("ZRPCD process started")
318
319     dataplane = ds_opts['dataplane']
320     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
321         logging.info("Enabling kernel modules for dpdk")
322         # file to module mapping
323         uio_types = {
324             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
325             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
326         }
327         for mod_file, mod in uio_types.items():
328             with open(mod_file, 'w') as fh:
329                 fh.write('#!/bin/bash\n')
330                 fh.write('exec /sbin/modprobe {}'.format(mod))
331                 fh.close()
332
333             virt_cmds.extend([
334                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
335                     mod_file)},
336                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
337                                    "{}".format(os.path.basename(mod_file))}
338             ])
339     if root_pw:
340         pw_op = "password:{}".format(root_pw)
341         virt_cmds.append({con.VIRT_PW: pw_op})
342     if ds_opts['sfc'] and dataplane == 'ovs':
343         virt_cmds.extend([
344             {con.VIRT_RUN_CMD: "yum -y install "
345                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
346                                "{}".format(OVS_NSH_KMOD_RPM)},
347             {con.VIRT_RUN_CMD: "yum downgrade -y "
348                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
349                                "{}".format(OVS_NSH_RPM)}
350         ])
351     if dataplane == 'fdio':
352         # Patch neutron with using OVS external interface for router
353         # and add generic linux NS interface driver
354         virt_cmds.append(
355             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
356                                "-p1 < neutron-patch-NSDriver.patch"})
357         if sdn is False:
358             virt_cmds.extend([
359                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
360                 {con.VIRT_RUN_CMD: "yum install -y "
361                                    "/root/nosdn_vpp_rpms/*.rpm"}
362             ])
363
364     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
365     shutil.copyfile(img, tmp_oc_image)
366     logging.debug("Temporary overcloud image stored as: {}".format(
367         tmp_oc_image))
368
369     if sdn == 'opendaylight':
370         undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
371             'installer_vm']['ip']
372         oc_builder.inject_opendaylight(
373             odl_version=ds_opts['odl_version'],
374             image=tmp_oc_image,
375             tmp_dir=tmp_dir,
376             uc_ip=undercloud_admin_ip,
377             os_version=ds_opts['os_version'],
378             docker_tag=docker_tag,
379         )
380         if docker_tag:
381             patched_containers = patched_containers.union({'opendaylight'})
382
383     if patches:
384         if ds_opts['os_version'] == 'master':
385             branch = ds_opts['os_version']
386         else:
387             branch = "stable/{}".format(ds_opts['os_version'])
388         logging.info('Adding patches to overcloud')
389         patched_containers = patched_containers.union(
390             c_builder.add_upstream_patches(patches,
391                                            tmp_oc_image, tmp_dir,
392                                            branch,
393                                            uc_ip=undercloud_admin_ip,
394                                            docker_tag=docker_tag))
395     # if containers with ceph, and no ceph device we need to use a
396     # persistent loop device for Ceph OSDs
397     if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
398         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
399         with open(tmp_losetup, 'w') as fh:
400             fh.write(LOSETUP_SERVICE)
401         virt_cmds.extend([
402             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
403              },
404             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
405             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
406             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
407         ])
408     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
409     logging.info("Overcloud image customization complete")
410     return patched_containers
411
412
413 def make_ssh_key():
414     """
415     Creates public and private ssh keys with 1024 bit RSA encryption
416     :return: private, public key
417     """
418     key = rsa.generate_private_key(
419         backend=crypto_default_backend(),
420         public_exponent=65537,
421         key_size=1024
422     )
423
424     private_key = key.private_bytes(
425         crypto_serialization.Encoding.PEM,
426         crypto_serialization.PrivateFormat.PKCS8,
427         crypto_serialization.NoEncryption())
428     public_key = key.public_key().public_bytes(
429         crypto_serialization.Encoding.OpenSSH,
430         crypto_serialization.PublicFormat.OpenSSH
431     )
432     return private_key.decode('utf-8'), public_key.decode('utf-8')
433
434
435 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
436     """
437     Creates modified opnfv/network environments for deployment
438     :param ds: deploy settings
439     :param ns: network settings
440     :param inv: node inventory
441     :param opnfv_env: file path for opnfv-environment file
442     :param net_env: file path for network-environment file
443     :param tmp_dir: Apex tmp dir
444     :return:
445     """
446
447     logging.info("Preparing opnfv-environment and network-environment files")
448     ds_opts = ds['deploy_options']
449     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
450     shutil.copyfile(opnfv_env, tmp_opnfv_env)
451     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
452     tenant_nic = dict()
453     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
454     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
455     external_nic_map = ns['networks']['external'][0]['nic_mapping']
456     external_nic = dict()
457     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
458
459     # SSH keys
460     private_key, public_key = make_ssh_key()
461
462     # Make easier/faster variables to index in the file editor
463     if 'performance' in ds_opts:
464         perf = True
465         # vpp
466         if 'vpp' in ds_opts['performance']['Compute']:
467             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
468         else:
469             perf_vpp_comp = None
470         if 'vpp' in ds_opts['performance']['Controller']:
471             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
472         else:
473             perf_vpp_ctrl = None
474
475         # ovs
476         if 'ovs' in ds_opts['performance']['Compute']:
477             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
478         else:
479             perf_ovs_comp = None
480
481         # kernel
482         if 'kernel' in ds_opts['performance']['Compute']:
483             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
484         else:
485             perf_kern_comp = None
486     else:
487         perf = False
488
489     # Modify OPNFV environment
490     # TODO: Change to build a dict and outputting yaml rather than parsing
491     for line in fileinput.input(tmp_opnfv_env, inplace=True):
492         line = line.strip('\n')
493         output_line = line
494         if 'CloudDomain' in line:
495             output_line = "  CloudDomain: {}".format(ns['domain_name'])
496         elif 'replace_private_key' in line:
497             output_line = "    private_key: |\n"
498             key_out = ''
499             for line in private_key.splitlines():
500                 key_out += "      {}\n".format(line)
501             output_line += key_out
502         elif 'replace_public_key' in line:
503             output_line = "    public_key: '{}'".format(public_key)
504         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
505                 'resource_registry' in line:
506             output_line = "resource_registry:\n" \
507                           "  OS::TripleO::NodeUserData: first-boot.yaml"
508         elif 'ComputeExtraConfigPre' in line and \
509                 ds_opts['dataplane'] == 'ovs_dpdk':
510             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
511                           './ovs-dpdk-preconfig.yaml'
512
513         if ds_opts['sdn_controller'] == 'opendaylight' and \
514                 'odl_vpp_routing_node' in ds_opts:
515             if 'opendaylight::vpp_routing_node' in line:
516                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
517                                .format(ds_opts['odl_vpp_routing_node'],
518                                        ns['domain_name']))
519         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
520             if 'NeutronVPPAgentPhysnets' in line:
521                 # VPP interface tap0 will be used for external network
522                 # connectivity.
523                 output_line = ("  NeutronVPPAgentPhysnets: "
524                                "'datacentre:{},external:tap0'"
525                                .format(tenant_nic['Controller']))
526         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
527                 'dvr') is True:
528             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
529                 output_line = ''
530             elif 'NeutronDhcpAgentsPerNetwork' in line:
531                 num_control, num_compute = inv.get_node_counts()
532                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
533                                .format(num_compute))
534             elif 'ComputeServices' in line:
535                 output_line = ("  ComputeServices:\n"
536                                "    - OS::TripleO::Services::NeutronDhcpAgent")
537         # SRIOV networks are VLAN based provider networks. In order to simplify
538         # the deployment, nfv_sriov will be the default physnet. VLANs are not
539         # needed in advance, and the user will have to create the network
540         # specifying the segmentation-id.
541         if ds_opts['sriov']:
542             if 'NeutronNetworkVLANRanges' in line:
543                 output_line = ("{},nfv_sriov'".format(line[:-1]))
544
545         if perf:
546             for role in 'NovaCompute', 'Controller':
547                 if role == 'NovaCompute':
548                     perf_opts = perf_vpp_comp
549                 else:
550                     perf_opts = perf_vpp_ctrl
551                 cfg = "{}ExtraConfig".format(role)
552                 if cfg in line and perf_opts:
553                     perf_line = ''
554                     if 'main-core' in perf_opts:
555                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
556                                       .format(perf_opts['main-core']))
557                     if 'corelist-workers' in perf_opts:
558                         perf_line += ("\n    "
559                                       "fdio::vpp_cpu_corelist_workers: '{}'"
560                                       .format(perf_opts['corelist-workers']))
561                     if ds_opts['sdn_controller'] == 'opendaylight' and \
562                             ds_opts['dataplane'] == 'fdio':
563                         if role == 'NovaCompute':
564                             perf_line += ("\n    "
565                                           "tripleo::profile::base::neutron::"
566                                           "agents::honeycomb::"
567                                           "interface_role_mapping:"
568                                           " ['{}:tenant-interface',"
569                                           "'{}:public-interface']"
570                                           .format(tenant_nic[role],
571                                                   external_nic[role]))
572                         else:
573                             perf_line += ("\n    "
574                                           "tripleo::profile::base::neutron::"
575                                           "agents::honeycomb::"
576                                           "interface_role_mapping:"
577                                           " ['{}:tenant-interface']"
578                                           .format(tenant_nic[role]))
579                     if perf_line:
580                         output_line = ("  {}:{}".format(cfg, perf_line))
581
582             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
583                 for k, v in OVS_PERF_MAP.items():
584                     if k in line and v in perf_ovs_comp:
585                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
586
587             # kernel args
588             # (FIXME) use compute's kernel settings for all nodes for now.
589             if perf_kern_comp:
590                 if 'NovaSchedulerDefaultFilters' in line:
591                     output_line = \
592                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
593                         "ComputeFilter,AvailabilityZoneFilter," \
594                         "ComputeCapabilitiesFilter," \
595                         "ImagePropertiesFilter,NUMATopologyFilter'"
596                 elif 'ComputeKernelArgs' in line:
597                     kernel_args = ''
598                     for k, v in perf_kern_comp.items():
599                         kernel_args += "{}={} ".format(k, v)
600                     if kernel_args:
601                         output_line = "  ComputeKernelArgs: '{}'".\
602                             format(kernel_args)
603
604         print(output_line)
605
606     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
607
608
609 def generate_ceph_key():
610     key = os.urandom(16)
611     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
612     return base64.b64encode(header + key)
613
614
615 def prep_storage_env(ds, ns, virtual, tmp_dir):
616     """
617     Creates storage environment file for deployment.  Source file is copied by
618     undercloud playbook to host.
619     :param ds:
620     :param ns:
621     :param virtual:
622     :param tmp_dir:
623     :return:
624     """
625     ds_opts = ds['deploy_options']
626     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
627     if not os.path.isfile(storage_file):
628         logging.error("storage-environment file is not in tmp directory: {}. "
629                       "Check if file was copied from "
630                       "undercloud".format(tmp_dir))
631         raise ApexDeployException("storage-environment file not copied from "
632                                   "undercloud")
633     for line in fileinput.input(storage_file, inplace=True):
634         line = line.strip('\n')
635         if 'CephClusterFSID' in line:
636             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
637         elif 'CephMonKey' in line:
638             print("  CephMonKey: {}".format(generate_ceph_key().decode(
639                 'utf-8')))
640         elif 'CephAdminKey' in line:
641             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
642                 'utf-8')))
643         elif 'CephClientKey' in line:
644             print("  CephClientKey: {}".format(generate_ceph_key().decode(
645                 'utf-8')))
646         else:
647             print(line)
648
649     if ds_opts['containers']:
650         undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
651             'installer_vm']['ip']
652         ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
653         docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
654                        "{}-centos-7".format(undercloud_admin_ip,
655                                             ceph_version)
656         ceph_params = {
657             'DockerCephDaemonImage': docker_image,
658         }
659
660         # max pgs allowed are calculated as num_mons * 200. Therefore we
661         # set number of pgs and pools so that the total will be less:
662         # num_pgs * num_pools * num_osds
663         ceph_params['CephPoolDefaultSize'] = 2
664         ceph_params['CephPoolDefaultPgNum'] = 32
665         if virtual:
666             ceph_params['CephAnsibleExtraConfig'] = {
667                 'centos_package_dependencies': [],
668                 'ceph_osd_docker_memory_limit': '1g',
669                 'ceph_mds_docker_memory_limit': '1g',
670             }
671         ceph_device = ds_opts['ceph_device']
672         ceph_params['CephAnsibleDisksConfig'] = {
673             'devices': [ceph_device],
674             'journal_size': 512,
675             'osd_scenario': 'collocated'
676         }
677         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
678     # TODO(trozet): remove following block as we only support containers now
679     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
680         with open(storage_file, 'a') as fh:
681             fh.write('  ExtraConfig:\n')
682             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
683                 ds_opts['ceph_device']
684             ))
685
686
687 def prep_sriov_env(ds, tmp_dir):
688     """
689     Creates SRIOV environment file for deployment. Source file is copied by
690     undercloud playbook to host.
691     :param ds:
692     :param tmp_dir:
693     :return:
694     """
695     ds_opts = ds['deploy_options']
696     sriov_iface = ds_opts['sriov']
697     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
698     if not os.path.isfile(sriov_file):
699         logging.error("sriov-environment file is not in tmp directory: {}. "
700                       "Check if file was copied from "
701                       "undercloud".format(tmp_dir))
702         raise ApexDeployException("sriov-environment file not copied from "
703                                   "undercloud")
704     # TODO(rnoriega): Instead of line editing, refactor this code to load
705     # yaml file into a dict, edit it and write the file back.
706     for line in fileinput.input(sriov_file, inplace=True):
707         line = line.strip('\n')
708         if 'NovaSchedulerDefaultFilters' in line:
709             print("  {}".format(line[3:]))
710         elif 'NovaSchedulerAvailableFilters' in line:
711             print("  {}".format(line[3:]))
712         elif 'NeutronPhysicalDevMappings' in line:
713             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
714                   .format(sriov_iface))
715         elif 'NeutronSriovNumVFs' in line:
716             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
717         elif 'NovaPCIPassthrough' in line:
718             print("  NovaPCIPassthrough:")
719         elif 'devname' in line:
720             print("    - devname: \"{}\"".format(sriov_iface))
721         elif 'physical_network' in line:
722             print("      physical_network: \"nfv_sriov\"")
723         else:
724             print(line)
725
726
727 def external_network_cmds(ns, ds):
728     """
729     Generates external network openstack commands
730     :param ns: network settings
731     :param ds: deploy settings
732     :return: list of commands to configure external network
733     """
734     ds_opts = ds['deploy_options']
735     external_physnet = 'datacentre'
736     if ds_opts['dataplane'] == 'fdio' and \
737        ds_opts['sdn_controller'] != 'opendaylight':
738         external_physnet = 'external'
739     if 'external' in ns.enabled_network_list:
740         net_config = ns['networks']['external'][0]
741         external = True
742         pool_start, pool_end = net_config['floating_ip_range']
743     else:
744         net_config = ns['networks']['admin']
745         external = False
746         pool_start, pool_end = ns['apex']['networks']['admin'][
747             'introspection_range']
748     nic_config = net_config['nic_mapping']
749     gateway = net_config['gateway']
750     cmds = list()
751     # create network command
752     if nic_config['compute']['vlan'] == 'native':
753         ext_type = 'flat'
754     else:
755         ext_type = "vlan --provider-segment {}".format(nic_config[
756                                                        'compute']['vlan'])
757     cmds.append("openstack network create external --project service "
758                 "--external --provider-network-type {} "
759                 "--provider-physical-network {}"
760                 .format(ext_type, external_physnet))
761     # create subnet command
762     cidr = net_config['cidr']
763     subnet_cmd = "openstack subnet create external-subnet --project " \
764                  "service --network external --no-dhcp --gateway {} " \
765                  "--allocation-pool start={},end={} --subnet-range " \
766                  "{}".format(gateway, pool_start, pool_end, str(cidr))
767     if external and cidr.version == 6:
768         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
769                       '--ipv6-address-mode slaac'
770     cmds.append(subnet_cmd)
771     logging.debug("Neutron external network commands determined "
772                   "as: {}".format(cmds))
773     return cmds
774
775
776 def create_congress_cmds(overcloud_file):
777     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
778     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
779     logging.info("Creating congress commands")
780     try:
781         ds_cfg = [
782             "username={}".format(overcloudrc['OS_USERNAME']),
783             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
784             "password={}".format(overcloudrc['OS_PASSWORD']),
785             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
786         ]
787     except KeyError:
788         logging.error("Unable to find all keys required for congress in "
789                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
790                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
791                       "file: {}".format(overcloud_file))
792         raise
793     cmds = list()
794     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
795
796     for driver in drivers:
797         if driver == 'doctor':
798             cmd = "{} \"{}\"".format(driver, driver)
799         else:
800             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
801         if driver == 'nova':
802             cmd += ' --config api_version="2.34"'
803         logging.debug("Congress command created: {}".format(cmd))
804         cmds.append(cmd)
805     return cmds