Merge "Updates configs for ODL CSIT deployments"
[apex.git] / apex / overcloud / deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import platform
15 import shutil
16 import uuid
17 import struct
18 import time
19 import apex.builders.overcloud_builder as oc_builder
20 import apex.builders.common_builder as c_builder
21
22 from apex.common import constants as con
23 from apex.common.exceptions import ApexDeployException
24 from apex.common import parsers
25 from apex.common import utils
26 from apex.virtual import utils as virt_utils
27 from cryptography.hazmat.primitives import serialization as \
28     crypto_serialization
29 from cryptography.hazmat.primitives.asymmetric import rsa
30 from cryptography.hazmat.backends import default_backend as \
31     crypto_default_backend
32
33
34 SDN_FILE_MAP = {
35     'opendaylight': {
36         'sfc': 'neutron-sfc-opendaylight.yaml',
37         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
38         'gluon': 'gluon.yaml',
39         'vpp': {
40             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
41             'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
42             'default': 'neutron-opendaylight-honeycomb.yaml'
43         },
44         'l2gw': 'neutron-l2gw-opendaylight.yaml',
45         'sriov': 'neutron-opendaylight-sriov.yaml',
46         'default': 'neutron-opendaylight.yaml',
47     },
48     'onos': {
49         'sfc': 'neutron-onos-sfc.yaml',
50         'default': 'neutron-onos.yaml'
51     },
52     'ovn': 'neutron-ml2-ovn.yaml',
53     False: {
54         'vpp': 'neutron-ml2-vpp.yaml',
55         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
56     }
57 }
58
59 OTHER_FILE_MAP = {
60     'tacker': 'enable_tacker.yaml',
61     'congress': 'enable_congress.yaml',
62     'barometer': 'enable_barometer.yaml',
63     'rt_kvm': 'enable_rt_kvm.yaml'
64 }
65
66 OVS_PERF_MAP = {
67     'HostCpusList': 'dpdk_cores',
68     'NeutronDpdkCoreList': 'pmd_cores',
69     'NeutronDpdkSocketMemory': 'socket_memory',
70     'NeutronDpdkMemoryChannels': 'memory_channels'
71 }
72
73 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
74 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
75 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
76                       ".noarch.rpm"
77
78 LOSETUP_SERVICE = """[Unit]
79 Description=Setup loop devices
80 Before=network.target
81
82 [Service]
83 Type=oneshot
84 ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
85 ExecStop=/sbin/losetup -d /dev/loop3
86 TimeoutSec=60
87 RemainAfterExit=yes
88
89 [Install]
90 WantedBy=multi-user.target
91 """
92
93
94 def build_sdn_env_list(ds, sdn_map, env_list=None):
95     """
96     Builds a list of SDN environment files to be used in the deploy cmd.
97
98     This function recursively searches an sdn_map.  First the sdn controller is
99     matched and then the function looks for enabled features for that
100     controller to determine which environment files should be used.  By
101     default the feature will be checked if set to true in deploy settings to be
102     added to the list.  If a feature does not have a boolean value, then the
103     key and value pair to compare with are checked as a tuple (k,v).
104
105     :param ds: deploy settings
106     :param sdn_map: SDN map to recursively search
107     :param env_list: recursive var to hold previously found env_list
108     :return: A list of env files
109     """
110     if env_list is None:
111         env_list = list()
112     for k, v in sdn_map.items():
113         if ds['sdn_controller'] == k or (k in ds and ds[k]):
114             if isinstance(v, dict):
115                 # Append default SDN env file first
116                 # The assumption is that feature-enabled SDN env files
117                 # override and do not conflict with previously set default
118                 # settings
119                 if ds['sdn_controller'] == k and 'default' in v:
120                     env_list.append(os.path.join(con.THT_ENV_DIR,
121                                                  v['default']))
122                 env_list.extend(build_sdn_env_list(ds, v))
123             # check if the value is not a boolean
124             elif isinstance(v, tuple):
125                     if ds[k] == v[0]:
126                         env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
127             else:
128                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
129     if len(env_list) == 0:
130         try:
131             env_list.append(os.path.join(
132                 con.THT_ENV_DIR, sdn_map['default']))
133         except KeyError:
134             logging.warning("Unable to find default file for SDN")
135
136     return env_list
137
138
139 def get_docker_sdn_file(ds_opts):
140     """
141     Returns docker env file for detected SDN
142     :param ds_opts: deploy options
143     :return: docker THT env file for an SDN
144     """
145     # FIXME(trozet): We assume right now there is only one docker SDN file
146     docker_services = con.VALID_DOCKER_SERVICES
147     tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
148     sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
149     for sdn_file in sdn_env_list:
150         sdn_base = os.path.basename(sdn_file)
151         if sdn_base in docker_services:
152             if docker_services[sdn_base] is not None:
153                 return os.path.join(tht_dir,
154                                     docker_services[sdn_base])
155             else:
156                 return os.path.join(tht_dir, sdn_base)
157
158
159 def create_deploy_cmd(ds, ns, inv, tmp_dir,
160                       virtual, env_file='opnfv-environment.yaml',
161                       net_data=False):
162
163     logging.info("Creating deployment command")
164     deploy_options = ['network-environment.yaml']
165
166     ds_opts = ds['deploy_options']
167
168     if ds_opts['containers']:
169         deploy_options.append(os.path.join(con.THT_ENV_DIR,
170                                            'docker.yaml'))
171
172     if ds['global_params']['ha_enabled']:
173         if ds_opts['containers']:
174             deploy_options.append(os.path.join(con.THT_ENV_DIR,
175                                                'docker-ha.yaml'))
176         else:
177             deploy_options.append(os.path.join(con.THT_ENV_DIR,
178                                                'puppet-pacemaker.yaml'))
179
180     if env_file:
181         deploy_options.append(env_file)
182
183     if ds_opts['containers']:
184         deploy_options.append('docker-images.yaml')
185         sdn_docker_file = get_docker_sdn_file(ds_opts)
186         if sdn_docker_file:
187             deploy_options.append(sdn_docker_file)
188             deploy_options.append('sdn-images.yaml')
189     else:
190         deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
191
192     for k, v in OTHER_FILE_MAP.items():
193         if k in ds_opts and ds_opts[k]:
194             if ds_opts['containers']:
195                 deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
196                                                    "{}.yaml".format(k)))
197             else:
198                 deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
199
200     if ds_opts['ceph'] and 'csit' not in env_file:
201         prep_storage_env(ds, ns, virtual, tmp_dir)
202         deploy_options.append(os.path.join(con.THT_ENV_DIR,
203                                            'storage-environment.yaml'))
204     if ds_opts['sriov']:
205         prep_sriov_env(ds, tmp_dir)
206
207     if virtual:
208         deploy_options.append('virtual-environment.yaml')
209     else:
210         deploy_options.append('baremetal-environment.yaml')
211
212     num_control, num_compute = inv.get_node_counts()
213     if num_control == 0 or num_compute == 0:
214         logging.error("Detected 0 control or compute nodes.  Control nodes: "
215                       "{}, compute nodes{}".format(num_control, num_compute))
216         raise ApexDeployException("Invalid number of control or computes")
217     elif num_control > 1 and not ds['global_params']['ha_enabled']:
218         num_control = 1
219     if platform.machine() == 'aarch64':
220         # aarch64 deploys were not completing in the default 90 mins.
221         # Not sure if this is related to the hardware the OOO support
222         # was developed on or the virtualization support in CentOS
223         # Either way it will probably get better over time  as the aarch
224         # support matures in CentOS and deploy time should be tested in
225         # the future so this multiplier can be removed.
226         con.DEPLOY_TIMEOUT *= 2
227     cmd = "openstack overcloud deploy --templates --timeout {} " \
228           .format(con.DEPLOY_TIMEOUT)
229     # build cmd env args
230     for option in deploy_options:
231         cmd += " -e {}".format(option)
232     cmd += " --ntp-server {}".format(ns['ntp'][0])
233     cmd += " --control-scale {}".format(num_control)
234     cmd += " --compute-scale {}".format(num_compute)
235     cmd += ' --control-flavor control --compute-flavor compute'
236     if net_data:
237         cmd += ' --networks-file network_data.yaml'
238     libvirt_type = 'kvm'
239     if virtual:
240         with open('/sys/module/kvm_intel/parameters/nested') as f:
241             nested_kvm = f.read().strip()
242             if nested_kvm != 'Y':
243                 libvirt_type = 'qemu'
244     cmd += ' --libvirt-type {}'.format(libvirt_type)
245     logging.info("Deploy command set: {}".format(cmd))
246
247     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
248         fh.write(cmd)
249     return cmd
250
251
252 def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
253                patches=None, upstream=False):
254     """
255     Locates sdn image and preps for deployment.
256     :param ds: deploy settings
257     :param ns: network settings
258     :param img: sdn image
259     :param tmp_dir: dir to store modified sdn image
260     :param root_pw: password to configure for overcloud image
261     :param docker_tag: Docker image tag for RDO version (default None)
262     :param patches: List of patches to apply to overcloud image
263     :param upstream: (boolean) Indicates if upstream deployment or not
264     :return: None
265     """
266     # TODO(trozet): Come up with a better way to organize this logic in this
267     # function
268     logging.info("Preparing image: {} for deployment".format(img))
269     if not os.path.isfile(img):
270         logging.error("Missing SDN image {}".format(img))
271         raise ApexDeployException("Missing SDN image file: {}".format(img))
272
273     ds_opts = ds['deploy_options']
274     virt_cmds = list()
275     sdn = ds_opts['sdn_controller']
276     patched_containers = set()
277     # we need this due to rhbz #1436021
278     # fixed in systemd-219-37.el7
279     if sdn is not False:
280         logging.info("Neutron openvswitch-agent disabled")
281         virt_cmds.extend([{
282             con.VIRT_RUN_CMD:
283                 "rm -f /etc/systemd/system/multi-user.target.wants/"
284                 "neutron-openvswitch-agent.service"},
285             {
286             con.VIRT_RUN_CMD:
287                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
288                 ".service"
289         }])
290
291     if ns.get('http_proxy', ''):
292         virt_cmds.append({
293             con.VIRT_RUN_CMD:
294                 "echo 'http_proxy={}' >> /etc/environment".format(
295                     ns['http_proxy'])})
296
297     if ns.get('https_proxy', ''):
298         virt_cmds.append({
299             con.VIRT_RUN_CMD:
300                 "echo 'https_proxy={}' >> /etc/environment".format(
301                     ns['https_proxy'])})
302
303     if ds_opts['vpn']:
304         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
305         virt_cmds.append({
306             con.VIRT_RUN_CMD:
307                 "echo 'sudo /opt/quagga/etc/init.d/zrpcd start' > "
308                 "/opt/quagga/etc/init.d/zrpcd_start.sh"})
309         virt_cmds.append({
310             con.VIRT_RUN_CMD: "chmod +x /opt/quagga/etc/init.d/"
311                               "zrpcd_start.sh"})
312         virt_cmds.append({
313             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
314                               "init.d/zrpcd_start.sh' /etc/rc.local "})
315         virt_cmds.append({
316             con.VIRT_RUN_CMD: "sed -i '$a /opt/quagga/etc/"
317                               "init.d/zrpcd_start.sh' /etc/rc.d/rc.local"})
318         logging.info("ZRPCD process started")
319
320     dataplane = ds_opts['dataplane']
321     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
322         logging.info("Enabling kernel modules for dpdk")
323         # file to module mapping
324         uio_types = {
325             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
326             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
327         }
328         for mod_file, mod in uio_types.items():
329             with open(mod_file, 'w') as fh:
330                 fh.write('#!/bin/bash\n')
331                 fh.write('exec /sbin/modprobe {}'.format(mod))
332                 fh.close()
333
334             virt_cmds.extend([
335                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
336                     mod_file)},
337                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
338                                    "{}".format(os.path.basename(mod_file))}
339             ])
340     if root_pw:
341         pw_op = "password:{}".format(root_pw)
342         virt_cmds.append({con.VIRT_PW: pw_op})
343     if ds_opts['sfc'] and dataplane == 'ovs':
344         virt_cmds.extend([
345             {con.VIRT_RUN_CMD: "yum -y install "
346                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
347                                "{}".format(OVS_NSH_KMOD_RPM)},
348             {con.VIRT_RUN_CMD: "yum downgrade -y "
349                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
350                                "{}".format(OVS_NSH_RPM)}
351         ])
352     if dataplane == 'fdio':
353         # Patch neutron with using OVS external interface for router
354         # and add generic linux NS interface driver
355         virt_cmds.append(
356             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
357                                "-p1 < neutron-patch-NSDriver.patch"})
358         if sdn is False:
359             virt_cmds.extend([
360                 {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
361                 {con.VIRT_RUN_CMD: "yum install -y "
362                                    "/root/nosdn_vpp_rpms/*.rpm"}
363             ])
364
365     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
366     shutil.copyfile(img, tmp_oc_image)
367     logging.debug("Temporary overcloud image stored as: {}".format(
368         tmp_oc_image))
369
370     # TODO (trozet): remove this if block after Fraser
371     if sdn == 'opendaylight' and not upstream:
372         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
373             virt_cmds.extend([
374                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
375                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
376                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
377                                    "/root/puppet-opendaylight-"
378                                    "{}.tar.gz".format(ds_opts['odl_version'])}
379             ])
380             if ds_opts['odl_version'] == 'master':
381                 virt_cmds.extend([
382                     {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
383                         ds_opts['odl_version'])}
384                 ])
385             else:
386                 virt_cmds.extend([
387                     {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
388                         ds_opts['odl_version'])}
389                 ])
390
391         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
392                 and ds_opts['odl_vpp_netvirt']:
393             virt_cmds.extend([
394                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
395                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
396                     ODL_NETVIRT_VPP_RPM)}
397             ])
398     elif sdn == 'opendaylight':
399         undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
400             'installer_vm']['ip']
401         oc_builder.inject_opendaylight(
402             odl_version=ds_opts['odl_version'],
403             image=tmp_oc_image,
404             tmp_dir=tmp_dir,
405             uc_ip=undercloud_admin_ip,
406             os_version=ds_opts['os_version'],
407             docker_tag=docker_tag,
408         )
409         if docker_tag:
410             patched_containers = patched_containers.union({'opendaylight'})
411
412     if patches:
413         if ds_opts['os_version'] == 'master':
414             branch = ds_opts['os_version']
415         else:
416             branch = "stable/{}".format(ds_opts['os_version'])
417         logging.info('Adding patches to overcloud')
418         patched_containers = patched_containers.union(
419             c_builder.add_upstream_patches(patches,
420                                            tmp_oc_image, tmp_dir,
421                                            branch,
422                                            uc_ip=undercloud_admin_ip,
423                                            docker_tag=docker_tag))
424     # if containers with ceph, and no ceph device we need to use a
425     # persistent loop device for Ceph OSDs
426     if docker_tag and not ds_opts.get('ceph_device', None):
427         tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
428         with open(tmp_losetup, 'w') as fh:
429             fh.write(LOSETUP_SERVICE)
430         virt_cmds.extend([
431             {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
432              },
433             {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
434             {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
435             {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
436         ])
437     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
438     logging.info("Overcloud image customization complete")
439     return patched_containers
440
441
442 def make_ssh_key():
443     """
444     Creates public and private ssh keys with 1024 bit RSA encryption
445     :return: private, public key
446     """
447     key = rsa.generate_private_key(
448         backend=crypto_default_backend(),
449         public_exponent=65537,
450         key_size=1024
451     )
452
453     private_key = key.private_bytes(
454         crypto_serialization.Encoding.PEM,
455         crypto_serialization.PrivateFormat.PKCS8,
456         crypto_serialization.NoEncryption())
457     public_key = key.public_key().public_bytes(
458         crypto_serialization.Encoding.OpenSSH,
459         crypto_serialization.PublicFormat.OpenSSH
460     )
461     return private_key.decode('utf-8'), public_key.decode('utf-8')
462
463
464 def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
465     """
466     Creates modified opnfv/network environments for deployment
467     :param ds: deploy settings
468     :param ns: network settings
469     :param inv: node inventory
470     :param opnfv_env: file path for opnfv-environment file
471     :param net_env: file path for network-environment file
472     :param tmp_dir: Apex tmp dir
473     :return:
474     """
475
476     logging.info("Preparing opnfv-environment and network-environment files")
477     ds_opts = ds['deploy_options']
478     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
479     shutil.copyfile(opnfv_env, tmp_opnfv_env)
480     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
481     tenant_nic = dict()
482     tenant_nic['Controller'] = tenant_nic_map['controller']['members'][0]
483     tenant_nic['NovaCompute'] = tenant_nic_map['compute']['members'][0]
484     external_nic_map = ns['networks']['external'][0]['nic_mapping']
485     external_nic = dict()
486     external_nic['NovaCompute'] = external_nic_map['compute']['members'][0]
487
488     # SSH keys
489     private_key, public_key = make_ssh_key()
490
491     # Make easier/faster variables to index in the file editor
492     if 'performance' in ds_opts:
493         perf = True
494         # vpp
495         if 'vpp' in ds_opts['performance']['Compute']:
496             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
497         else:
498             perf_vpp_comp = None
499         if 'vpp' in ds_opts['performance']['Controller']:
500             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
501         else:
502             perf_vpp_ctrl = None
503
504         # ovs
505         if 'ovs' in ds_opts['performance']['Compute']:
506             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
507         else:
508             perf_ovs_comp = None
509
510         # kernel
511         if 'kernel' in ds_opts['performance']['Compute']:
512             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
513         else:
514             perf_kern_comp = None
515     else:
516         perf = False
517
518     # Modify OPNFV environment
519     # TODO: Change to build a dict and outputting yaml rather than parsing
520     for line in fileinput.input(tmp_opnfv_env, inplace=True):
521         line = line.strip('\n')
522         output_line = line
523         if 'CloudDomain' in line:
524             output_line = "  CloudDomain: {}".format(ns['domain_name'])
525         elif 'replace_private_key' in line:
526             output_line = "    private_key: |\n"
527             key_out = ''
528             for line in private_key.splitlines():
529                 key_out += "      {}\n".format(line)
530             output_line += key_out
531         elif 'replace_public_key' in line:
532             output_line = "    public_key: '{}'".format(public_key)
533         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
534                 'resource_registry' in line:
535             output_line = "resource_registry:\n" \
536                           "  OS::TripleO::NodeUserData: first-boot.yaml"
537         elif 'ComputeExtraConfigPre' in line and \
538                 ds_opts['dataplane'] == 'ovs_dpdk':
539             output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
540                           './ovs-dpdk-preconfig.yaml'
541
542         if ds_opts['sdn_controller'] == 'opendaylight' and \
543                 'odl_vpp_routing_node' in ds_opts:
544             if 'opendaylight::vpp_routing_node' in line:
545                 output_line = ("    opendaylight::vpp_routing_node: {}.{}"
546                                .format(ds_opts['odl_vpp_routing_node'],
547                                        ns['domain_name']))
548         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
549             if 'NeutronVPPAgentPhysnets' in line:
550                 # VPP interface tap0 will be used for external network
551                 # connectivity.
552                 output_line = ("  NeutronVPPAgentPhysnets: "
553                                "'datacentre:{},external:tap0'"
554                                .format(tenant_nic['Controller']))
555         elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
556                 'dvr') is True:
557             if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
558                 output_line = ''
559             elif 'NeutronDhcpAgentsPerNetwork' in line:
560                 num_control, num_compute = inv.get_node_counts()
561                 output_line = ("  NeutronDhcpAgentsPerNetwork: {}"
562                                .format(num_compute))
563             elif 'ComputeServices' in line:
564                 output_line = ("  ComputeServices:\n"
565                                "    - OS::TripleO::Services::NeutronDhcpAgent")
566         # SRIOV networks are VLAN based provider networks. In order to simplify
567         # the deployment, nfv_sriov will be the default physnet. VLANs are not
568         # needed in advance, and the user will have to create the network
569         # specifying the segmentation-id.
570         if ds_opts['sriov']:
571             if 'NeutronNetworkVLANRanges' in line:
572                 output_line = ("{},nfv_sriov'".format(line[:-1]))
573
574         if perf:
575             for role in 'NovaCompute', 'Controller':
576                 if role == 'NovaCompute':
577                     perf_opts = perf_vpp_comp
578                 else:
579                     perf_opts = perf_vpp_ctrl
580                 cfg = "{}ExtraConfig".format(role)
581                 if cfg in line and perf_opts:
582                     perf_line = ''
583                     if 'main-core' in perf_opts:
584                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
585                                       .format(perf_opts['main-core']))
586                     if 'corelist-workers' in perf_opts:
587                         perf_line += ("\n    "
588                                       "fdio::vpp_cpu_corelist_workers: '{}'"
589                                       .format(perf_opts['corelist-workers']))
590                     if ds_opts['sdn_controller'] == 'opendaylight' and \
591                             ds_opts['dataplane'] == 'fdio':
592                         if role == 'NovaCompute':
593                             perf_line += ("\n    "
594                                           "tripleo::profile::base::neutron::"
595                                           "agents::honeycomb::"
596                                           "interface_role_mapping:"
597                                           " ['{}:tenant-interface',"
598                                           "'{}:public-interface']"
599                                           .format(tenant_nic[role],
600                                                   external_nic[role]))
601                         else:
602                             perf_line += ("\n    "
603                                           "tripleo::profile::base::neutron::"
604                                           "agents::honeycomb::"
605                                           "interface_role_mapping:"
606                                           " ['{}:tenant-interface']"
607                                           .format(tenant_nic[role]))
608                     if perf_line:
609                         output_line = ("  {}:{}".format(cfg, perf_line))
610
611             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
612                 for k, v in OVS_PERF_MAP.items():
613                     if k in line and v in perf_ovs_comp:
614                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
615
616             # kernel args
617             # (FIXME) use compute's kernel settings for all nodes for now.
618             if perf_kern_comp:
619                 if 'NovaSchedulerDefaultFilters' in line:
620                     output_line = \
621                         "  NovaSchedulerDefaultFilters: 'RamFilter," \
622                         "ComputeFilter,AvailabilityZoneFilter," \
623                         "ComputeCapabilitiesFilter," \
624                         "ImagePropertiesFilter,NUMATopologyFilter'"
625                 elif 'ComputeKernelArgs' in line:
626                     kernel_args = ''
627                     for k, v in perf_kern_comp.items():
628                         kernel_args += "{}={} ".format(k, v)
629                     if kernel_args:
630                         output_line = "  ComputeKernelArgs: '{}'".\
631                             format(kernel_args)
632
633         print(output_line)
634
635     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
636
637
638 def generate_ceph_key():
639     key = os.urandom(16)
640     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
641     return base64.b64encode(header + key)
642
643
644 def prep_storage_env(ds, ns, virtual, tmp_dir):
645     """
646     Creates storage environment file for deployment.  Source file is copied by
647     undercloud playbook to host.
648     :param ds:
649     :param ns:
650     :param virtual:
651     :param tmp_dir:
652     :return:
653     """
654     ds_opts = ds['deploy_options']
655     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
656     if not os.path.isfile(storage_file):
657         logging.error("storage-environment file is not in tmp directory: {}. "
658                       "Check if file was copied from "
659                       "undercloud".format(tmp_dir))
660         raise ApexDeployException("storage-environment file not copied from "
661                                   "undercloud")
662     for line in fileinput.input(storage_file, inplace=True):
663         line = line.strip('\n')
664         if 'CephClusterFSID' in line:
665             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
666         elif 'CephMonKey' in line:
667             print("  CephMonKey: {}".format(generate_ceph_key().decode(
668                 'utf-8')))
669         elif 'CephAdminKey' in line:
670             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
671                 'utf-8')))
672         elif 'CephClientKey' in line:
673             print("  CephClientKey: {}".format(generate_ceph_key().decode(
674                 'utf-8')))
675         else:
676             print(line)
677
678     if ds_opts['containers']:
679         undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
680             'installer_vm']['ip']
681         ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
682         docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
683                        "{}-centos-7".format(undercloud_admin_ip,
684                                             ceph_version)
685         ceph_params = {
686             'DockerCephDaemonImage': docker_image,
687         }
688         if not ds['global_params']['ha_enabled']:
689             ceph_params['CephPoolDefaultSize'] = 1
690
691         if virtual:
692             ceph_params['CephAnsibleExtraConfig'] = {
693                 'centos_package_dependencies': [],
694                 'ceph_osd_docker_memory_limit': '1g',
695                 'ceph_mds_docker_memory_limit': '1g',
696             }
697             ceph_params['CephPoolDefaultPgNum'] = 32
698         if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
699             ceph_device = ds_opts['ceph_device']
700         else:
701             # TODO(trozet): make this DS default after Fraser
702             ceph_device = '/dev/loop3'
703
704         ceph_params['CephAnsibleDisksConfig'] = {
705             'devices': [ceph_device],
706             'journal_size': 512,
707             'osd_scenario': 'collocated'
708         }
709         utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
710     elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
711         with open(storage_file, 'a') as fh:
712             fh.write('  ExtraConfig:\n')
713             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
714                 ds_opts['ceph_device']
715             ))
716
717
718 def prep_sriov_env(ds, tmp_dir):
719     """
720     Creates SRIOV environment file for deployment. Source file is copied by
721     undercloud playbook to host.
722     :param ds:
723     :param tmp_dir:
724     :return:
725     """
726     ds_opts = ds['deploy_options']
727     sriov_iface = ds_opts['sriov']
728     sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
729     if not os.path.isfile(sriov_file):
730         logging.error("sriov-environment file is not in tmp directory: {}. "
731                       "Check if file was copied from "
732                       "undercloud".format(tmp_dir))
733         raise ApexDeployException("sriov-environment file not copied from "
734                                   "undercloud")
735     # TODO(rnoriega): Instead of line editing, refactor this code to load
736     # yaml file into a dict, edit it and write the file back.
737     for line in fileinput.input(sriov_file, inplace=True):
738         line = line.strip('\n')
739         if 'NovaSchedulerDefaultFilters' in line:
740             print("  {}".format(line[3:]))
741         elif 'NovaSchedulerAvailableFilters' in line:
742             print("  {}".format(line[3:]))
743         elif 'NeutronPhysicalDevMappings' in line:
744             print("  NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
745                   .format(sriov_iface))
746         elif 'NeutronSriovNumVFs' in line:
747             print("  NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
748         elif 'NovaPCIPassthrough' in line:
749             print("  NovaPCIPassthrough:")
750         elif 'devname' in line:
751             print("    - devname: \"{}\"".format(sriov_iface))
752         elif 'physical_network' in line:
753             print("      physical_network: \"nfv_sriov\"")
754         else:
755             print(line)
756
757
758 def external_network_cmds(ns, ds):
759     """
760     Generates external network openstack commands
761     :param ns: network settings
762     :param ds: deploy settings
763     :return: list of commands to configure external network
764     """
765     ds_opts = ds['deploy_options']
766     external_physnet = 'datacentre'
767     if ds_opts['dataplane'] == 'fdio' and \
768        ds_opts['sdn_controller'] != 'opendaylight':
769         external_physnet = 'external'
770     if 'external' in ns.enabled_network_list:
771         net_config = ns['networks']['external'][0]
772         external = True
773         pool_start, pool_end = net_config['floating_ip_range']
774     else:
775         net_config = ns['networks']['admin']
776         external = False
777         pool_start, pool_end = ns['apex']['networks']['admin'][
778             'introspection_range']
779     nic_config = net_config['nic_mapping']
780     gateway = net_config['gateway']
781     cmds = list()
782     # create network command
783     if nic_config['compute']['vlan'] == 'native':
784         ext_type = 'flat'
785     else:
786         ext_type = "vlan --provider-segment {}".format(nic_config[
787                                                        'compute']['vlan'])
788     cmds.append("openstack network create external --project service "
789                 "--external --provider-network-type {} "
790                 "--provider-physical-network {}"
791                 .format(ext_type, external_physnet))
792     # create subnet command
793     cidr = net_config['cidr']
794     subnet_cmd = "openstack subnet create external-subnet --project " \
795                  "service --network external --no-dhcp --gateway {} " \
796                  "--allocation-pool start={},end={} --subnet-range " \
797                  "{}".format(gateway, pool_start, pool_end, str(cidr))
798     if external and cidr.version == 6:
799         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
800                       '--ipv6-address-mode slaac'
801     cmds.append(subnet_cmd)
802     logging.debug("Neutron external network commands determined "
803                   "as: {}".format(cmds))
804     return cmds
805
806
807 def create_congress_cmds(overcloud_file):
808     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
809     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
810     logging.info("Creating congress commands")
811     try:
812         ds_cfg = [
813             "username={}".format(overcloudrc['OS_USERNAME']),
814             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
815             "password={}".format(overcloudrc['OS_PASSWORD']),
816             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
817         ]
818     except KeyError:
819         logging.error("Unable to find all keys required for congress in "
820                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
821                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
822                       "file: {}".format(overcloud_file))
823         raise
824     cmds = list()
825     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
826
827     for driver in drivers:
828         if driver == 'doctor':
829             cmd = "{} \"{}\"".format(driver, driver)
830         else:
831             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
832         if driver == 'nova':
833             cmd += ' --config api_version="2.34"'
834         logging.debug("Congress command created: {}".format(cmd))
835         cmds.append(cmd)
836     return cmds