Merge "Adding python unittests for apex/virtual/*"
[apex.git] / apex / overcloud / overcloud_deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import re
15 import shutil
16 import uuid
17 import struct
18 import time
19
20 from apex.common import constants as con
21 from apex.common.exceptions import ApexDeployException
22 from apex.common import parsers
23 from apex.virtual import utils as virt_utils
24 from cryptography.hazmat.primitives import serialization as \
25     crypto_serialization
26 from cryptography.hazmat.primitives.asymmetric import rsa
27 from cryptography.hazmat.backends import default_backend as \
28     crypto_default_backend
29
30
31 SDN_FILE_MAP = {
32     'opendaylight': {
33         'sfc': 'neutron-sfc-opendaylight.yaml',
34         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
35         'gluon': 'gluon.yaml',
36         'vpp': {
37             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
38             'default': 'neutron-opendaylight-honeycomb.yaml'
39         },
40         'default': 'neutron-opendaylight.yaml',
41     },
42     'onos': {
43         'sfc': 'neutron-onos-sfc.yaml',
44         'default': 'neutron-onos.yaml'
45     },
46     'ovn': 'neutron-ml2-ovn.yaml',
47     False: {
48         'vpp': 'neutron-ml2-vpp.yaml',
49         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
50     }
51 }
52
53 OTHER_FILE_MAP = {
54     'tacker': 'enable_tacker.yaml',
55     'congress': 'enable_congress.yaml',
56     'barometer': 'enable_barometer.yaml',
57     'rt_kvm': 'enable_rt_kvm.yaml'
58 }
59
60 OVS_PERF_MAP = {
61     'HostCpusList': 'dpdk_cores',
62     'NeutronDpdkCoreList': 'pmd_cores',
63     'NeutronDpdkSocketMemory': 'socket_memory',
64     'NeutronDpdkMemoryChannels': 'memory_channels'
65 }
66
67 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
68 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
69 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
70                       ".noarch.rpm"
71
72
73 def build_sdn_env_list(ds, sdn_map, env_list=None):
74     if env_list is None:
75         env_list = list()
76     for k, v in sdn_map.items():
77         if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
78             if isinstance(v, dict):
79                 env_list.extend(build_sdn_env_list(ds, v))
80             else:
81                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
82         elif isinstance(v, tuple):
83                 if ds[k] == v[0]:
84                     env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
85     if len(env_list) == 0:
86         try:
87             env_list.append(os.path.join(
88                 con.THT_ENV_DIR, sdn_map['default']))
89         except KeyError:
90             logging.warning("Unable to find default file for SDN")
91
92     return env_list
93
94
95 def create_deploy_cmd(ds, ns, inv, tmp_dir,
96                       virtual, env_file='opnfv-environment.yaml'):
97
98     logging.info("Creating deployment command")
99     deploy_options = [env_file, 'network-environment.yaml']
100     ds_opts = ds['deploy_options']
101     deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
102
103     for k, v in OTHER_FILE_MAP.items():
104         if k in ds_opts and ds_opts[k]:
105             deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
106
107     if ds_opts['ceph']:
108         prep_storage_env(ds, tmp_dir)
109         deploy_options.append(os.path.join(con.THT_ENV_DIR,
110                                            'storage-environment.yaml'))
111     if ds['global_params']['ha_enabled']:
112         deploy_options.append(os.path.join(con.THT_ENV_DIR,
113                                            'puppet-pacemaker.yaml'))
114
115     if virtual:
116         deploy_options.append('virtual-environment.yaml')
117     else:
118         deploy_options.append('baremetal-environment.yaml')
119
120     nodes = inv['nodes']
121     num_control = 0
122     num_compute = 0
123     for node in nodes:
124         if 'profile:control' in node['capabilities']:
125             num_control += 1
126         elif 'profile:compute' in node['capabilities']:
127             num_compute += 1
128         else:
129             # TODO(trozet) do we want to allow capabilities to not exist?
130             logging.error("Every node must include a 'capabilities' key "
131                           "tagged with either 'profile:control' or "
132                           "'profile:compute'")
133             raise ApexDeployException("Node missing capabilities "
134                                       "key: {}".format(node))
135     if num_control == 0 or num_compute == 0:
136         logging.error("Detected 0 control or compute nodes.  Control nodes: "
137                       "{}, compute nodes{}".format(num_control, num_compute))
138         raise ApexDeployException("Invalid number of control or computes")
139     elif num_control > 1 and not ds['global_params']['ha_enabled']:
140         num_control = 1
141     cmd = "openstack overcloud deploy --templates --timeout {} " \
142           "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
143     # build cmd env args
144     for option in deploy_options:
145         cmd += " -e {}".format(option)
146     cmd += " --ntp-server {}".format(ns['ntp'][0])
147     cmd += " --control-scale {}".format(num_control)
148     cmd += " --compute-scale {}".format(num_compute)
149     cmd += ' --control-flavor control --compute-flavor compute'
150     logging.info("Deploy command set: {}".format(cmd))
151
152     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
153         fh.write(cmd)
154     return cmd
155
156
157 def prep_image(ds, img, tmp_dir, root_pw=None):
158     """
159     Locates sdn image and preps for deployment.
160     :param ds: deploy settings
161     :param img: sdn image
162     :param tmp_dir: dir to store modified sdn image
163     :param root_pw: password to configure for overcloud image
164     :return: None
165     """
166     # TODO(trozet): Come up with a better way to organize this logic in this
167     # function
168     logging.info("Preparing image: {} for deployment".format(img))
169     if not os.path.isfile(img):
170         logging.error("Missing SDN image {}".format(img))
171         raise ApexDeployException("Missing SDN image file: {}".format(img))
172
173     ds_opts = ds['deploy_options']
174     virt_cmds = list()
175     sdn = ds_opts['sdn_controller']
176     # we need this due to rhbz #1436021
177     # fixed in systemd-219-37.el7
178     if sdn is not False:
179         logging.info("Neutron openvswitch-agent disabled")
180         virt_cmds.extend([{
181             con.VIRT_RUN_CMD:
182                 "rm -f /etc/systemd/system/multi-user.target.wants/"
183                 "neutron-openvswitch-agent.service"},
184             {
185             con.VIRT_RUN_CMD:
186                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
187                 ".service"
188         }])
189
190     if ds_opts['vpn']:
191         virt_cmds.append({con.VIRT_RUN_CMD: "systemctl enable zrpcd"})
192         logging.info("ZRPC and Quagga enabled")
193
194     dataplane = ds_opts['dataplane']
195     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
196         logging.info("Enabling kernel modules for dpdk")
197         # file to module mapping
198         uio_types = {
199             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
200             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
201         }
202         for mod_file, mod in uio_types.items():
203             with open(mod_file, 'w') as fh:
204                 fh.write('#!/bin/bash\n')
205                 fh.write('exec /sbin/modprobe {}'.format(mod))
206                 fh.close()
207
208             virt_cmds.extend([
209                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
210                     mod_file)},
211                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
212                                    "{}".format(os.path.basename(mod_file))}
213             ])
214     if root_pw:
215         pw_op = "password:{}".format(root_pw)
216         virt_cmds.append({con.VIRT_PW: pw_op})
217     if ds_opts['sfc'] and dataplane == 'ovs':
218         virt_cmds.extend([
219             {con.VIRT_RUN_CMD: "yum -y install "
220                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
221                                "{}".format(OVS_NSH_KMOD_RPM)},
222             {con.VIRT_RUN_CMD: "yum downgrade -y "
223                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
224                                "{}".format(OVS_NSH_RPM)}
225         ])
226     if dataplane == 'fdio':
227         # Patch neutron with using OVS external interface for router
228         # and add generic linux NS interface driver
229         virt_cmds.append(
230             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
231                                "-p1 < neutron-patch-NSDriver.patch"})
232
233     if sdn == 'opendaylight':
234         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
235             virt_cmds.extend([
236                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
237                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
238                     ds_opts['odl_version'])},
239                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
240                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
241                                    "/root/puppet-opendaylight-"
242                                    "{}.tar.gz".format(ds_opts['odl_version'])}
243             ])
244         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
245                 and ds_opts['odl_vpp_netvirt']:
246             virt_cmds.extend([
247                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
248                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
249                     ODL_NETVIRT_VPP_RPM)}
250             ])
251
252     if sdn == 'ovn':
253         virt_cmds.extend([
254             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
255                                "*openvswitch*"},
256             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
257                                "*openvswitch*"}
258         ])
259
260     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
261     shutil.copyfile(img, tmp_oc_image)
262     logging.debug("Temporary overcloud image stored as: {}".format(
263         tmp_oc_image))
264     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
265     logging.info("Overcloud image customization complete")
266
267
268 def make_ssh_key():
269     """
270     Creates public and private ssh keys with 1024 bit RSA encryption
271     :return: private, public key
272     """
273     key = rsa.generate_private_key(
274         backend=crypto_default_backend(),
275         public_exponent=65537,
276         key_size=1024
277     )
278
279     private_key = key.private_bytes(
280         crypto_serialization.Encoding.PEM,
281         crypto_serialization.PrivateFormat.PKCS8,
282         crypto_serialization.NoEncryption())
283     public_key = key.public_key().public_bytes(
284         crypto_serialization.Encoding.OpenSSH,
285         crypto_serialization.PublicFormat.OpenSSH
286     )
287     pub_key = re.sub('ssh-rsa\s*', '', public_key.decode('utf-8'))
288     return private_key.decode('utf-8'), pub_key
289
290
291 def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
292     """
293     Creates modified opnfv/network environments for deployment
294     :param ds: deploy settings
295     :param ns: network settings
296     :param opnfv_env: file path for opnfv-environment file
297     :param net_env: file path for network-environment file
298     :param tmp_dir: Apex tmp dir
299     :return:
300     """
301
302     logging.info("Preparing opnfv-environment and network-environment files")
303     ds_opts = ds['deploy_options']
304     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
305     shutil.copyfile(opnfv_env, tmp_opnfv_env)
306     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
307     tenant_ctrl_nic = tenant_nic_map['controller']['members'][0]
308     tenant_comp_nic = tenant_nic_map['compute']['members'][0]
309
310     # SSH keys
311     private_key, public_key = make_ssh_key()
312
313     # Make easier/faster variables to index in the file editor
314     if 'performance' in ds_opts:
315         perf = True
316         # vpp
317         if 'vpp' in ds_opts['performance']['Compute']:
318             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
319         else:
320             perf_vpp_comp = None
321         if 'vpp' in ds_opts['performance']['Controller']:
322             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
323         else:
324             perf_vpp_ctrl = None
325
326         # ovs
327         if 'ovs' in ds_opts['performance']['Compute']:
328             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
329         else:
330             perf_ovs_comp = None
331
332         # kernel
333         if 'kernel' in ds_opts['performance']['Compute']:
334             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
335         else:
336             perf_kern_comp = None
337     else:
338         perf = False
339
340     # Modify OPNFV environment
341     # TODO: Change to build a dict and outputing yaml rather than parsing
342     for line in fileinput.input(tmp_opnfv_env, inplace=True):
343         line = line.strip('\n')
344         output_line = line
345         if 'CloudDomain' in line:
346             output_line = "  CloudDomain: {}".format(ns['domain_name'])
347         elif 'replace_private_key' in line:
348             output_line = "      key: '{}'".format(private_key)
349         elif 'replace_public_key' in line:
350             output_line = "      key: '{}'".format(public_key)
351
352         if ds_opts['sdn_controller'] == 'opendaylight' and \
353                 'odl_vpp_routing_node' in ds_opts and ds_opts[
354                 'odl_vpp_routing_node'] != 'dvr':
355             if 'opendaylight::vpp_routing_node' in line:
356                 output_line = ("    opendaylight::vpp_routing_node: ${}.${}"
357                                .format(ds_opts['odl_vpp_routing_node'],
358                                        ns['domain_name']))
359             elif 'ControllerExtraConfig' in line:
360                 output_line = ("  ControllerExtraConfig:\n    "
361                                "tripleo::profile::base::neutron::agents::"
362                                "honeycomb::interface_role_mapping:"
363                                " ['{}:tenant-interface]'"
364                                .format(tenant_ctrl_nic))
365             elif 'NovaComputeExtraConfig' in line:
366                 output_line = ("  NovaComputeExtraConfig:\n    "
367                                "tripleo::profile::base::neutron::agents::"
368                                "honeycomb::interface_role_mapping:"
369                                " ['{}:tenant-interface]'"
370                                .format(tenant_comp_nic))
371         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
372             if 'NeutronVPPAgentPhysnets' in line:
373                 output_line = ("  NeutronVPPAgentPhysnets: 'datacentre:{}'".
374                                format(tenant_ctrl_nic))
375
376         if perf:
377             for role in 'NovaCompute', 'Controller':
378                 if role == 'NovaCompute':
379                     perf_opts = perf_vpp_comp
380                 else:
381                     perf_opts = perf_vpp_ctrl
382                 cfg = "{}ExtraConfig".format(role)
383                 if cfg in line and perf_opts:
384                     perf_line = ''
385                     if 'main-core' in perf_opts:
386                         perf_line += ("\n    fdio::vpp_cpu_main_core: '{}'"
387                                       .format(perf_opts['main-core']))
388                     if 'corelist-workers' in perf_opts:
389                         perf_line += ("\n    "
390                                       "fdio::vpp_cpu_corelist_workers: '{}'"
391                                       .format(perf_opts['corelist-workers']))
392                     if perf_line:
393                         output_line = ("  {}:{}".format(cfg, perf_line))
394
395             # kernel args
396             # (FIXME) use compute's kernel settings for all nodes for now.
397             if 'ComputeKernelArgs' in line and perf_kern_comp:
398                 kernel_args = ''
399                 for k, v in perf_kern_comp.items():
400                     kernel_args += "{}={} ".format(k, v)
401                 if kernel_args:
402                     output_line = "  ComputeKernelArgs: '{}'".\
403                         format(kernel_args)
404             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
405                 for k, v in OVS_PERF_MAP.items():
406                     if k in line and v in perf_ovs_comp:
407                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
408
409         print(output_line)
410
411     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
412
413     # Modify Network environment
414     for line in fileinput.input(net_env, inplace=True):
415         line = line.strip('\n')
416         if 'ComputeExtraConfigPre' in line and \
417                 ds_opts['dataplane'] == 'ovs_dpdk':
418             print('  OS::TripleO::ComputeExtraConfigPre: '
419                   './ovs-dpdk-preconfig.yaml')
420         elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
421                 'resource_registry' in line:
422             print("resource_registry:\n"
423                   "  OS::TripleO::NodeUserData: first-boot.yaml")
424         elif perf and perf_kern_comp and \
425                 'NovaSchedulerDefaultFilters' in line:
426             print("  NovaSchedulerDefaultFilters: 'RamFilter,"
427                   "ComputeFilter,AvailabilityZoneFilter,"
428                   "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
429                   "NUMATopologyFilter'")
430         else:
431             print(line)
432
433     logging.info("network-environment file written to {}".format(net_env))
434
435
436 def generate_ceph_key():
437     key = os.urandom(16)
438     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
439     return base64.b64encode(header + key)
440
441
442 def prep_storage_env(ds, tmp_dir):
443     """
444     Creates storage environment file for deployment.  Source file is copied by
445     undercloud playbook to host.
446     :param ds:
447     :param tmp_dir:
448     :return:
449     """
450     ds_opts = ds['deploy_options']
451     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
452     if not os.path.isfile(storage_file):
453         logging.error("storage-environment file is not in tmp directory: {}. "
454                       "Check if file was copied from "
455                       "undercloud".format(tmp_dir))
456         raise ApexDeployException("storage-environment file not copied from "
457                                   "undercloud")
458     for line in fileinput.input(storage_file, inplace=True):
459         line = line.strip('\n')
460         if 'CephClusterFSID' in line:
461             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
462         elif 'CephMonKey' in line:
463             print("  CephMonKey: {}".format(generate_ceph_key().decode(
464                 'utf-8')))
465         elif 'CephAdminKey' in line:
466             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
467                 'utf-8')))
468         else:
469             print(line)
470     if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
471         with open(storage_file, 'a') as fh:
472             fh.write('  ExtraConfig:\n')
473             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
474                 ds_opts['ceph_device']
475             ))
476
477
478 def external_network_cmds(ns):
479     """
480     Generates external network openstack commands
481     :param ns: network settings
482     :return: list of commands to configure external network
483     """
484     if 'external' in ns.enabled_network_list:
485         net_config = ns['networks']['external'][0]
486         external = True
487         pool_start, pool_end = net_config['floating_ip_range']
488     else:
489         net_config = ns['networks']['admin']
490         external = False
491         pool_start, pool_end = ns['apex']['networks']['admin'][
492             'introspection_range']
493     nic_config = net_config['nic_mapping']
494     gateway = net_config['gateway']
495     cmds = list()
496     # create network command
497     if nic_config['compute']['vlan'] == 'native':
498         ext_type = 'flat'
499     else:
500         ext_type = "vlan --provider-segment {}".format(nic_config[
501                                                        'compute']['vlan'])
502     cmds.append("openstack network create external --project service "
503                 "--external --provider-network-type {} "
504                 "--provider-physical-network datacentre".format(ext_type))
505     # create subnet command
506     cidr = net_config['cidr']
507     subnet_cmd = "openstack subnet create external-subnet --project " \
508                  "service --network external --no-dhcp --gateway {} " \
509                  "--allocation-pool start={},end={} --subnet-range " \
510                  "{}".format(gateway, pool_start, pool_end, str(cidr))
511     if external and cidr.version == 6:
512         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
513                       '--ipv6-address-mode slaac'
514     cmds.append(subnet_cmd)
515     logging.debug("Neutron external network commands determined "
516                   "as: {}".format(cmds))
517     return cmds
518
519
520 def create_congress_cmds(overcloud_file):
521     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
522     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
523     logging.info("Creating congress commands")
524     try:
525         ds_cfg = [
526             "username={}".format(overcloudrc['OS_USERNAME']),
527             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
528             "password={}".format(overcloudrc['OS_PASSWORD']),
529             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
530         ]
531     except KeyError:
532         logging.error("Unable to find all keys required for congress in "
533                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
534                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
535                       "file: {}".format(overcloud_file))
536         raise
537     cmds = list()
538     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
539
540     for driver in drivers:
541         if driver == 'doctor':
542             cmd = "{} \"{}\"".format(driver, driver)
543         else:
544             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
545         if driver == 'nova':
546             cmd += ' --config api_version="2.34"'
547         logging.debug("Congress command created: {}".format(cmd))
548         cmds.append(cmd)
549     return cmds