Merge "Migrates Apex to Python"
[apex.git] / apex / overcloud / overcloud_deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import re
15 import shutil
16 import uuid
17 import struct
18 import time
19
20 from apex.common import constants as con
21 from apex.common.exceptions import ApexDeployException
22 from apex.common import parsers
23 from apex.virtual import virtual_utils as virt_utils
24 from cryptography.hazmat.primitives import serialization as \
25     crypto_serialization
26 from cryptography.hazmat.primitives.asymmetric import rsa
27 from cryptography.hazmat.backends import default_backend as \
28     crypto_default_backend
29
30
31 SDN_FILE_MAP = {
32     'opendaylight': {
33         'sfc': 'opendaylight_sfc.yaml',
34         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
35         'gluon': 'gluon.yaml',
36         'vpp': {
37             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
38             'default': 'neutron-opendaylight-honeycomb.yaml'
39         },
40         'default': 'neutron-opendaylight.yaml',
41     },
42     'onos': {
43         'sfc': 'neutron-onos-sfc.yaml',
44         'default': 'neutron-onos.yaml'
45     },
46     'ovn': 'neutron-ml2-ovn.yaml',
47     False: {
48         'vpp': 'neutron-ml2-vpp.yaml',
49         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
50     }
51 }
52
53 OTHER_FILE_MAP = {
54     'tacker': 'enable_tacker.yaml',
55     'congress': 'enable_congress.yaml',
56     'barometer': 'enable_barometer.yaml',
57     'rt_kvm': 'enable_rt_kvm.yaml'
58 }
59
60 OVS_PERF_MAP = {
61     'HostCpusList': 'dpdk_cores',
62     'NeutronDpdkCoreList': 'pmd_cores',
63     'NeutronDpdkSocketMemory': 'socket_memory',
64     'NeutronDpdkMemoryChannels': 'memory_channels'
65 }
66
67 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
68 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
69 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
70                       ".noarch.rpm"
71
72
73 def build_sdn_env_list(ds, sdn_map, env_list=None):
74     if env_list is None:
75         env_list = list()
76     for k, v in sdn_map.items():
77         if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
78             if isinstance(v, dict):
79                 env_list.extend(build_sdn_env_list(ds, v))
80             else:
81                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
82         elif isinstance(v, tuple):
83                 if ds[k] == v[0]:
84                     env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
85     if len(env_list) == 0:
86         try:
87             env_list.append(os.path.join(
88                 con.THT_ENV_DIR, sdn_map[ds['sdn_controller']]['default']))
89         except KeyError:
90             logging.warning("Unable to find default file for SDN")
91
92     return env_list
93
94
95 def create_deploy_cmd(ds, ns, inv, tmp_dir,
96                       virtual, env_file='opnfv-environment.yaml'):
97
98     logging.info("Creating deployment command")
99     deploy_options = [env_file, 'network-environment.yaml']
100     ds_opts = ds['deploy_options']
101     deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
102
103     # TODO(trozet): make sure rt kvm file is in tht dir
104     for k, v in OTHER_FILE_MAP.items():
105         if k in ds_opts and ds_opts[k]:
106             deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
107
108     if ds_opts['ceph']:
109         prep_storage_env(ds, tmp_dir)
110         deploy_options.append(os.path.join(con.THT_ENV_DIR,
111                                            'storage-environment.yaml'))
112     if ds['global_params']['ha_enabled']:
113         deploy_options.append(os.path.join(con.THT_ENV_DIR,
114                                            'puppet-pacemaker.yaml'))
115
116     if virtual:
117         deploy_options.append('virtual-environment.yaml')
118     else:
119         deploy_options.append('baremetal-environment.yaml')
120
121     nodes = inv['nodes']
122     num_control = 0
123     num_compute = 0
124     for node in nodes:
125         if node['capabilities'] == 'profile:control':
126             num_control += 1
127         elif node['capabilities'] == 'profile:compute':
128             num_compute += 1
129         else:
130             # TODO(trozet) do we want to allow capabilities to not exist?
131             logging.error("Every node must include a 'capabilities' key "
132                           "tagged with either 'profile:control' or "
133                           "'profile:compute'")
134             raise ApexDeployException("Node missing capabilities "
135                                       "key: {}".format(node))
136     if num_control == 0 or num_compute == 0:
137         logging.error("Detected 0 control or compute nodes.  Control nodes: "
138                       "{}, compute nodes{}".format(num_control, num_compute))
139         raise ApexDeployException("Invalid number of control or computes")
140     cmd = "openstack overcloud deploy --templates --timeout {} " \
141           "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
142     # build cmd env args
143     for option in deploy_options:
144         cmd += " -e {}".format(option)
145     cmd += " --ntp-server {}".format(ns['ntp'][0])
146     cmd += " --control-scale {}".format(num_control)
147     cmd += " --compute-scale {}".format(num_compute)
148     cmd += ' --control-flavor control --compute-flavor compute'
149     logging.info("Deploy command set: {}".format(cmd))
150
151     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
152         fh.write(cmd)
153     return cmd
154
155
156 def prep_image(ds, img, tmp_dir, root_pw=None):
157     """
158     Locates sdn image and preps for deployment.
159     :param ds: deploy settings
160     :param img: sdn image
161     :param tmp_dir: dir to store modified sdn image
162     :param root_pw: password to configure for overcloud image
163     :return: None
164     """
165     # TODO(trozet): Come up with a better way to organize this logic in this
166     # function
167     logging.info("Preparing image: {} for deployment".format(img))
168     if not os.path.isfile(img):
169         logging.error("Missing SDN image {}".format(img))
170         raise ApexDeployException("Missing SDN image file: {}".format(img))
171
172     ds_opts = ds['deploy_options']
173     virt_cmds = list()
174     sdn = ds_opts['sdn_controller']
175     # we need this due to rhbz #1436021
176     # fixed in systemd-219-37.el7
177     if sdn is not False:
178         logging.info("Neutron openvswitch-agent disabled")
179         virt_cmds.extend([{
180             con.VIRT_RUN_CMD:
181                 "rm -f /etc/systemd/system/multi-user.target.wants/"
182                 "neutron-openvswitch-agent.service"},
183             {
184             con.VIRT_RUN_CMD:
185                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
186                 ".service"
187         }])
188
189     if ds_opts['vpn']:
190         virt_cmds.append({con.VIRT_RUN_CMD: "systemctl enable zrpcd"})
191         logging.info("ZRPC and Quagga enabled")
192
193     dataplane = ds_opts['dataplane']
194     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
195         logging.info("Enabling kernel modules for dpdk")
196         # file to module mapping
197         uio_types = {
198             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
199             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
200         }
201         for mod_file, mod in uio_types:
202             with open(mod_file, 'w') as fh:
203                 fh.write('#!/bin/bash\n')
204                 fh.write('exec /sbin/modprobe {}'.format(mod))
205                 fh.close()
206
207             virt_cmds.extend([
208                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
209                     mod_file)},
210                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
211                                    "{}".format(os.path.basename(mod_file))}
212             ])
213     if root_pw:
214         pw_op = "password:{}".format(root_pw)
215         virt_cmds.append({con.VIRT_PW: pw_op})
216     if ds_opts['sfc'] and dataplane == 'ovs':
217         virt_cmds.extend([
218             {con.VIRT_RUN_CMD: "yum -y install "
219                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
220                                "{}".format(OVS_NSH_KMOD_RPM)},
221             {con.VIRT_RUN_CMD: "yum upgrade -y "
222                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
223                                "{}".format(OVS_NSH_RPM)}
224         ])
225     if dataplane == 'fdio':
226         # Patch neutron with using OVS external interface for router
227         # and add generic linux NS interface driver
228         virt_cmds.append(
229             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
230                                "-p1 < neutron-patch-NSDriver.patch"})
231
232     if sdn == 'opendaylight':
233         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
234             virt_cmds.extend([
235                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
236                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
237                     con.DEFAULT_ODL_VERSION)},
238                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
239                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
240                                    "/root/puppet-opendaylight-"
241                                    "{}.tar.gz".format(ds_opts['odl_version'])}
242             ])
243         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
244                 and ds_opts['odl_vpp_netvirt']:
245             virt_cmds.extend([
246                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
247                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
248                     ODL_NETVIRT_VPP_RPM)}
249             ])
250
251     if sdn == 'ovn':
252         virt_cmds.extend([
253             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
254                                "*openvswitch*"},
255             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
256                                "*openvswitch*"}
257         ])
258
259     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
260     shutil.copyfile(img, tmp_oc_image)
261     logging.debug("Temporary overcloud image stored as: {}".format(
262         tmp_oc_image))
263     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
264     logging.info("Overcloud image customization complete")
265
266
267 def make_ssh_key():
268     """
269     Creates public and private ssh keys with 1024 bit RSA encryption
270     :return: private, public key
271     """
272     key = rsa.generate_private_key(
273         backend=crypto_default_backend(),
274         public_exponent=65537,
275         key_size=1024
276     )
277
278     private_key = key.private_bytes(
279         crypto_serialization.Encoding.PEM,
280         crypto_serialization.PrivateFormat.PKCS8,
281         crypto_serialization.NoEncryption())
282     public_key = key.public_key().public_bytes(
283         crypto_serialization.Encoding.OpenSSH,
284         crypto_serialization.PublicFormat.OpenSSH
285     )
286     pub_key = re.sub('ssh-rsa\s*', '', public_key.decode('utf-8'))
287     return private_key.decode('utf-8'), pub_key
288
289
290 def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
291     """
292     Creates modified opnfv/network environments for deployment
293     :param ds: deploy settings
294     :param ns: network settings
295     :param opnfv_env: file path for opnfv-environment file
296     :param net_env: file path for network-environment file
297     :param tmp_dir: Apex tmp dir
298     :return:
299     """
300
301     logging.info("Preparing opnfv-environment and network-environment files")
302     ds_opts = ds['deploy_options']
303     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
304     shutil.copyfile(opnfv_env, tmp_opnfv_env)
305     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
306     tenant_ctrl_nic = tenant_nic_map['controller']['members'][0]
307     tenant_comp_nic = tenant_nic_map['compute']['members'][0]
308
309     # SSH keys
310     private_key, public_key = make_ssh_key()
311
312     # Make easier/faster variables to index in the file editor
313     if 'performance' in ds_opts:
314         perf = True
315         # vpp
316         if 'vpp' in ds_opts['performance']['Compute']:
317             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
318         else:
319             perf_vpp_comp = None
320         if 'vpp' in ds_opts['performance']['Controller']:
321             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
322         else:
323             perf_vpp_ctrl = None
324
325         # ovs
326         if 'ovs' in ds_opts['performance']['Compute']:
327             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
328         else:
329             perf_ovs_comp = None
330
331         # kernel
332         if 'kernel' in ds_opts['performance']['Compute']:
333             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
334         else:
335             perf_kern_comp = None
336     else:
337         perf = False
338
339     # Modify OPNFV environment
340     for line in fileinput.input(tmp_opnfv_env, inplace=True):
341         line = line.strip('\n')
342         if 'CloudDomain' in line:
343             print("  CloudDomain: {}".format(ns['domain_name']))
344         elif ds_opts['sdn_controller'] == 'opendaylight' and \
345                 'odl_vpp_routing_node' in ds_opts and ds_opts[
346                 'odl_vpp_routing_node'] != 'dvr':
347             if 'opendaylight::vpp_routing_node' in line:
348                 print("    opendaylight::vpp_routing_node: ${}.${}".format(
349                     ds_opts['odl_vpp_routing_node'], ns['domain_name']))
350             elif 'ControllerExtraConfig' in line:
351                 print("  ControllerExtraConfig:\n    "
352                       "tripleo::profile::base::neutron::agents::honeycomb"
353                       "::interface_role_mapping: ['{}:tenant-"
354                       "interface]'".format(tenant_ctrl_nic))
355             elif 'NovaComputeExtraConfig' in line:
356                 print("  NovaComputeExtraConfig:\n    "
357                       "tripleo::profile::base::neutron::agents::honeycomb"
358                       "::interface_role_mapping: ['{}:tenant-"
359                       "interface]'".format(tenant_comp_nic))
360             else:
361                 print(line)
362
363         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
364             if 'NeutronVPPAgentPhysnets' in line:
365                 print("  NeutronVPPAgentPhysnets: 'datacentre:{}'".format(
366                     tenant_ctrl_nic))
367             else:
368                 print(line)
369         elif perf:
370             line_printed = False
371             for role in 'NovaCompute', 'Controller':
372                 if role == 'NovaCompute':
373                     perf_opts = perf_vpp_comp
374                 else:
375                     perf_opts = perf_vpp_ctrl
376                 cfg = "{}ExtraConfig".format(role)
377                 if cfg in line and perf_opts:
378                     if 'main-core' in perf_opts:
379                         print("  {}:\n"
380                               "    fdio::vpp_cpu_main_core: '{}'"
381                               "".format(cfg, perf_opts['main-core']))
382                         line_printed = True
383                         break
384                     elif 'corelist-workers' in perf_vpp_comp:
385                         print("  {}:\n"
386                               "    fdio::vpp_cpu_corelist_workers: '{}'"
387                               "".format(cfg, perf_opts['corelist-workers']))
388                         line_printed = True
389                         break
390
391             # kernel args
392             # (FIXME) use compute's kernel settings for all nodes for now.
393             if 'ComputeKernelArgs' in line and perf_kern_comp:
394                 kernel_args = ''
395                 for k, v in perf_kern_comp.items():
396                     kernel_args += "{}={}".format(k, v)
397                 if kernel_args:
398                     print("ComputeKernelArgs: '{}'".format(kernel_args))
399                     line_printed = True
400             elif ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
401                 for k, v in OVS_PERF_MAP.items():
402                     if k in line and v in perf_ovs_comp:
403                         print("  {}: {}".format(k, perf_ovs_comp[v]))
404                         line_printed = True
405
406             if not line_printed:
407                 print(line)
408         elif 'replace_private_key' in line:
409             print("      key: '{}'".format(private_key))
410         elif 'replace_public_key' in line:
411             print("      key: '{}'".format(public_key))
412         else:
413             print(line)
414
415     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
416
417     # Modify Network environment
418     for line in fileinput.input(net_env, inplace=True):
419         line = line.strip('\n')
420         if ds_opts['dataplane'] == 'ovs_dpdk':
421             if 'ComputeExtraConfigPre' in line:
422                 print('  OS::TripleO::ComputeExtraConfigPre: '
423                       './ovs-dpdk-preconfig.yaml')
424             else:
425                 print(line)
426         elif perf and perf_kern_comp:
427             if 'resource_registry' in line:
428                 print("resource_registry:\n"
429                       "  OS::TripleO::NodeUserData: first-boot.yaml")
430             elif 'NovaSchedulerDefaultFilters' in line:
431                 print("  NovaSchedulerDefaultFilters: 'RamFilter,"
432                       "ComputeFilter,AvailabilityZoneFilter,"
433                       "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
434                       "NUMATopologyFilter'")
435             else:
436                 print(line)
437         else:
438             print(line)
439
440     logging.info("network-environment file written to {}".format(net_env))
441
442
443 def generate_ceph_key():
444     key = os.urandom(16)
445     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
446     return base64.b64encode(header + key)
447
448
449 def prep_storage_env(ds, tmp_dir):
450     """
451     Creates storage environment file for deployment.  Source file is copied by
452     undercloud playbook to host.
453     :param ds:
454     :param tmp_dir:
455     :return:
456     """
457     ds_opts = ds['deploy_options']
458     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
459     if not os.path.isfile(storage_file):
460         logging.error("storage-environment file is not in tmp directory: {}. "
461                       "Check if file was copied from "
462                       "undercloud".format(tmp_dir))
463         raise ApexDeployException("storage-environment file not copied from "
464                                   "undercloud")
465     for line in fileinput.input(storage_file, inplace=True):
466         line = line.strip('\n')
467         if 'CephClusterFSID' in line:
468             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
469         elif 'CephMonKey' in line:
470             print("  CephMonKey: {}".format(generate_ceph_key().decode(
471                 'utf-8')))
472         elif 'CephAdminKey' in line:
473             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
474                 'utf-8')))
475         else:
476             print(line)
477     if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
478         with open(storage_file, 'a') as fh:
479             fh.write('  ExtraConfig:\n')
480             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
481                 ds_opts['ceph_device']
482             ))
483
484
485 def external_network_cmds(ns):
486     """
487     Generates external network openstack commands
488     :param ns: network settings
489     :return: list of commands to configure external network
490     """
491     if 'external' in ns.enabled_network_list:
492         net_config = ns['networks']['external'][0]
493         external = True
494         pool_start, pool_end = net_config['floating_ip_range']
495     else:
496         net_config = ns['networks']['admin']
497         external = False
498         pool_start, pool_end = ns['apex']['networks']['admin'][
499             'introspection_range']
500     nic_config = net_config['nic_mapping']
501     gateway = net_config['gateway']
502     cmds = list()
503     # create network command
504     if nic_config['compute']['vlan'] == 'native':
505         ext_type = 'flat'
506     else:
507         ext_type = "vlan --provider-segment {}".format(nic_config[
508                                                        'compute']['vlan'])
509     cmds.append("openstack network create external --project service "
510                 "--external --provider-network-type {} "
511                 "--provider-physical-network datacentre".format(ext_type))
512     # create subnet command
513     cidr = net_config['cidr']
514     subnet_cmd = "openstack subnet create external-subnet --project " \
515                  "service --network external --no-dhcp --gateway {} " \
516                  "--allocation-pool start={},end={} --subnet-range " \
517                  "{}".format(gateway, pool_start, pool_end, str(cidr))
518     if external and cidr.version == 6:
519         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
520                       '--ipv6-address-mode slaac'
521     cmds.append(subnet_cmd)
522     logging.debug("Neutron external network commands determined "
523                   "as: {}".format(cmds))
524     return cmds
525
526
527 def create_congress_cmds(overcloud_file):
528     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
529     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
530     logging.info("Creating congress commands")
531     try:
532         ds_cfg = [
533             "username={}".format(overcloudrc['OS_USERNAME']),
534             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
535             "password={}".format(overcloudrc['OS_PASSWORD']),
536             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
537         ]
538     except KeyError:
539         logging.error("Unable to find all keys required for congress in "
540                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
541                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
542                       "file: {}".format(overcloud_file))
543         raise
544     cmds = list()
545     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
546
547     for driver in drivers:
548         if driver == 'doctor':
549             cmd = "{} \"{}\"".format(driver, driver)
550         else:
551             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
552         if driver == 'nova':
553             cmd += '--config api_version="2.34"'
554         logging.debug("Congress command created: {}".format(cmd))
555         cmds.append(cmd)
556     return cmds