Merge "Fixes missing iso packages and idempotency of virt resources"
[apex.git] / apex / overcloud / overcloud_deploy.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import base64
11 import fileinput
12 import logging
13 import os
14 import re
15 import shutil
16 import uuid
17 import struct
18 import time
19
20 from apex.common import constants as con
21 from apex.common.exceptions import ApexDeployException
22 from apex.common import parsers
23 from apex.virtual import virtual_utils as virt_utils
24 from cryptography.hazmat.primitives import serialization as \
25     crypto_serialization
26 from cryptography.hazmat.primitives.asymmetric import rsa
27 from cryptography.hazmat.backends import default_backend as \
28     crypto_default_backend
29
30
31 SDN_FILE_MAP = {
32     'opendaylight': {
33         'sfc': 'opendaylight_sfc.yaml',
34         'vpn': 'neutron-bgpvpn-opendaylight.yaml',
35         'gluon': 'gluon.yaml',
36         'vpp': {
37             'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
38             'default': 'neutron-opendaylight-honeycomb.yaml'
39         },
40         'default': 'neutron-opendaylight.yaml',
41     },
42     'onos': {
43         'sfc': 'neutron-onos-sfc.yaml',
44         'default': 'neutron-onos.yaml'
45     },
46     'ovn': 'neutron-ml2-ovn.yaml',
47     False: {
48         'vpp': 'neutron-ml2-vpp.yaml',
49         'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
50     }
51 }
52
53 OTHER_FILE_MAP = {
54     'tacker': 'enable_tacker.yaml',
55     'congress': 'enable_congress.yaml',
56     'barometer': 'enable_barometer.yaml',
57     'rt_kvm': 'enable_rt_kvm.yaml'
58 }
59
60 OVS_PERF_MAP = {
61     'HostCpusList': 'dpdk_cores',
62     'NeutronDpdkCoreList': 'pmd_cores',
63     'NeutronDpdkSocketMemory': 'socket_memory',
64     'NeutronDpdkMemoryChannels': 'memory_channels'
65 }
66
67 OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
68 OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
69 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
70                       ".noarch.rpm"
71
72
73 def build_sdn_env_list(ds, sdn_map, env_list=None):
74     if env_list is None:
75         env_list = list()
76     for k, v in sdn_map.items():
77         if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
78             if isinstance(v, dict):
79                 env_list.extend(build_sdn_env_list(ds, v))
80             else:
81                 env_list.append(os.path.join(con.THT_ENV_DIR, v))
82         elif isinstance(v, tuple):
83                 if ds[k] == v[0]:
84                     env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
85     if len(env_list) == 0:
86         try:
87             env_list.append(os.path.join(
88                 con.THT_ENV_DIR, sdn_map[ds['sdn_controller']]['default']))
89         except KeyError:
90             logging.warning("Unable to find default file for SDN")
91
92     return env_list
93
94
95 def create_deploy_cmd(ds, ns, inv, tmp_dir,
96                       virtual, env_file='opnfv-environment.yaml'):
97
98     logging.info("Creating deployment command")
99     deploy_options = [env_file, 'network-environment.yaml']
100     ds_opts = ds['deploy_options']
101     deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
102
103     # TODO(trozet): make sure rt kvm file is in tht dir
104     for k, v in OTHER_FILE_MAP.items():
105         if k in ds_opts and ds_opts[k]:
106             deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
107
108     if ds_opts['ceph']:
109         prep_storage_env(ds, tmp_dir)
110         deploy_options.append(os.path.join(con.THT_ENV_DIR,
111                                            'storage-environment.yaml'))
112     if ds['global_params']['ha_enabled']:
113         deploy_options.append(os.path.join(con.THT_ENV_DIR,
114                                            'puppet-pacemaker.yaml'))
115
116     if virtual:
117         deploy_options.append('virtual-environment.yaml')
118     else:
119         deploy_options.append('baremetal-environment.yaml')
120
121     nodes = inv['nodes']
122     num_control = 0
123     num_compute = 0
124     for node in nodes:
125         if node['capabilities'] == 'profile:control':
126             num_control += 1
127         elif node['capabilities'] == 'profile:compute':
128             num_compute += 1
129         else:
130             # TODO(trozet) do we want to allow capabilities to not exist?
131             logging.error("Every node must include a 'capabilities' key "
132                           "tagged with either 'profile:control' or "
133                           "'profile:compute'")
134             raise ApexDeployException("Node missing capabilities "
135                                       "key: {}".format(node))
136     if num_control == 0 or num_compute == 0:
137         logging.error("Detected 0 control or compute nodes.  Control nodes: "
138                       "{}, compute nodes{}".format(num_control, num_compute))
139         raise ApexDeployException("Invalid number of control or computes")
140     elif num_control > 1 and not ds['global_params']['ha_enabled']:
141         num_control = 1
142     cmd = "openstack overcloud deploy --templates --timeout {} " \
143           "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
144     # build cmd env args
145     for option in deploy_options:
146         cmd += " -e {}".format(option)
147     cmd += " --ntp-server {}".format(ns['ntp'][0])
148     cmd += " --control-scale {}".format(num_control)
149     cmd += " --compute-scale {}".format(num_compute)
150     cmd += ' --control-flavor control --compute-flavor compute'
151     logging.info("Deploy command set: {}".format(cmd))
152
153     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
154         fh.write(cmd)
155     return cmd
156
157
158 def prep_image(ds, img, tmp_dir, root_pw=None):
159     """
160     Locates sdn image and preps for deployment.
161     :param ds: deploy settings
162     :param img: sdn image
163     :param tmp_dir: dir to store modified sdn image
164     :param root_pw: password to configure for overcloud image
165     :return: None
166     """
167     # TODO(trozet): Come up with a better way to organize this logic in this
168     # function
169     logging.info("Preparing image: {} for deployment".format(img))
170     if not os.path.isfile(img):
171         logging.error("Missing SDN image {}".format(img))
172         raise ApexDeployException("Missing SDN image file: {}".format(img))
173
174     ds_opts = ds['deploy_options']
175     virt_cmds = list()
176     sdn = ds_opts['sdn_controller']
177     # we need this due to rhbz #1436021
178     # fixed in systemd-219-37.el7
179     if sdn is not False:
180         logging.info("Neutron openvswitch-agent disabled")
181         virt_cmds.extend([{
182             con.VIRT_RUN_CMD:
183                 "rm -f /etc/systemd/system/multi-user.target.wants/"
184                 "neutron-openvswitch-agent.service"},
185             {
186             con.VIRT_RUN_CMD:
187                 "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
188                 ".service"
189         }])
190
191     if ds_opts['vpn']:
192         virt_cmds.append({con.VIRT_RUN_CMD: "systemctl enable zrpcd"})
193         logging.info("ZRPC and Quagga enabled")
194
195     dataplane = ds_opts['dataplane']
196     if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
197         logging.info("Enabling kernel modules for dpdk")
198         # file to module mapping
199         uio_types = {
200             os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
201             os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
202         }
203         for mod_file, mod in uio_types:
204             with open(mod_file, 'w') as fh:
205                 fh.write('#!/bin/bash\n')
206                 fh.write('exec /sbin/modprobe {}'.format(mod))
207                 fh.close()
208
209             virt_cmds.extend([
210                 {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
211                     mod_file)},
212                 {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
213                                    "{}".format(os.path.basename(mod_file))}
214             ])
215     if root_pw:
216         pw_op = "password:{}".format(root_pw)
217         virt_cmds.append({con.VIRT_PW: pw_op})
218     if ds_opts['sfc'] and dataplane == 'ovs':
219         virt_cmds.extend([
220             {con.VIRT_RUN_CMD: "yum -y install "
221                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
222                                "{}".format(OVS_NSH_KMOD_RPM)},
223             {con.VIRT_RUN_CMD: "yum upgrade -y "
224                                "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
225                                "{}".format(OVS_NSH_RPM)}
226         ])
227     if dataplane == 'fdio':
228         # Patch neutron with using OVS external interface for router
229         # and add generic linux NS interface driver
230         virt_cmds.append(
231             {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
232                                "-p1 < neutron-patch-NSDriver.patch"})
233
234     if sdn == 'opendaylight':
235         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
236             virt_cmds.extend([
237                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
238                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
239                     con.DEFAULT_ODL_VERSION)},
240                 {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
241                 {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
242                                    "/root/puppet-opendaylight-"
243                                    "{}.tar.gz".format(ds_opts['odl_version'])}
244             ])
245         elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
246                 and ds_opts['odl_vpp_netvirt']:
247             virt_cmds.extend([
248                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
249                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
250                     ODL_NETVIRT_VPP_RPM)}
251             ])
252
253     if sdn == 'ovn':
254         virt_cmds.extend([
255             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
256                                "*openvswitch*"},
257             {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
258                                "*openvswitch*"}
259         ])
260
261     tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
262     shutil.copyfile(img, tmp_oc_image)
263     logging.debug("Temporary overcloud image stored as: {}".format(
264         tmp_oc_image))
265     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
266     logging.info("Overcloud image customization complete")
267
268
269 def make_ssh_key():
270     """
271     Creates public and private ssh keys with 1024 bit RSA encryption
272     :return: private, public key
273     """
274     key = rsa.generate_private_key(
275         backend=crypto_default_backend(),
276         public_exponent=65537,
277         key_size=1024
278     )
279
280     private_key = key.private_bytes(
281         crypto_serialization.Encoding.PEM,
282         crypto_serialization.PrivateFormat.PKCS8,
283         crypto_serialization.NoEncryption())
284     public_key = key.public_key().public_bytes(
285         crypto_serialization.Encoding.OpenSSH,
286         crypto_serialization.PublicFormat.OpenSSH
287     )
288     pub_key = re.sub('ssh-rsa\s*', '', public_key.decode('utf-8'))
289     return private_key.decode('utf-8'), pub_key
290
291
292 def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
293     """
294     Creates modified opnfv/network environments for deployment
295     :param ds: deploy settings
296     :param ns: network settings
297     :param opnfv_env: file path for opnfv-environment file
298     :param net_env: file path for network-environment file
299     :param tmp_dir: Apex tmp dir
300     :return:
301     """
302
303     logging.info("Preparing opnfv-environment and network-environment files")
304     ds_opts = ds['deploy_options']
305     tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
306     shutil.copyfile(opnfv_env, tmp_opnfv_env)
307     tenant_nic_map = ns['networks']['tenant']['nic_mapping']
308     tenant_ctrl_nic = tenant_nic_map['controller']['members'][0]
309     tenant_comp_nic = tenant_nic_map['compute']['members'][0]
310
311     # SSH keys
312     private_key, public_key = make_ssh_key()
313
314     # Make easier/faster variables to index in the file editor
315     if 'performance' in ds_opts:
316         perf = True
317         # vpp
318         if 'vpp' in ds_opts['performance']['Compute']:
319             perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
320         else:
321             perf_vpp_comp = None
322         if 'vpp' in ds_opts['performance']['Controller']:
323             perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
324         else:
325             perf_vpp_ctrl = None
326
327         # ovs
328         if 'ovs' in ds_opts['performance']['Compute']:
329             perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
330         else:
331             perf_ovs_comp = None
332
333         # kernel
334         if 'kernel' in ds_opts['performance']['Compute']:
335             perf_kern_comp = ds_opts['performance']['Compute']['kernel']
336         else:
337             perf_kern_comp = None
338     else:
339         perf = False
340
341     # Modify OPNFV environment
342     for line in fileinput.input(tmp_opnfv_env, inplace=True):
343         line = line.strip('\n')
344         if 'CloudDomain' in line:
345             print("  CloudDomain: {}".format(ns['domain_name']))
346         elif ds_opts['sdn_controller'] == 'opendaylight' and \
347                 'odl_vpp_routing_node' in ds_opts and ds_opts[
348                 'odl_vpp_routing_node'] != 'dvr':
349             if 'opendaylight::vpp_routing_node' in line:
350                 print("    opendaylight::vpp_routing_node: ${}.${}".format(
351                     ds_opts['odl_vpp_routing_node'], ns['domain_name']))
352             elif 'ControllerExtraConfig' in line:
353                 print("  ControllerExtraConfig:\n    "
354                       "tripleo::profile::base::neutron::agents::honeycomb"
355                       "::interface_role_mapping: ['{}:tenant-"
356                       "interface]'".format(tenant_ctrl_nic))
357             elif 'NovaComputeExtraConfig' in line:
358                 print("  NovaComputeExtraConfig:\n    "
359                       "tripleo::profile::base::neutron::agents::honeycomb"
360                       "::interface_role_mapping: ['{}:tenant-"
361                       "interface]'".format(tenant_comp_nic))
362             else:
363                 print(line)
364
365         elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
366             if 'NeutronVPPAgentPhysnets' in line:
367                 print("  NeutronVPPAgentPhysnets: 'datacentre:{}'".format(
368                     tenant_ctrl_nic))
369             else:
370                 print(line)
371         elif perf:
372             line_printed = False
373             for role in 'NovaCompute', 'Controller':
374                 if role == 'NovaCompute':
375                     perf_opts = perf_vpp_comp
376                 else:
377                     perf_opts = perf_vpp_ctrl
378                 cfg = "{}ExtraConfig".format(role)
379                 if cfg in line and perf_opts:
380                     if 'main-core' in perf_opts:
381                         print("  {}:\n"
382                               "    fdio::vpp_cpu_main_core: '{}'"
383                               "".format(cfg, perf_opts['main-core']))
384                         line_printed = True
385                         break
386                     elif 'corelist-workers' in perf_vpp_comp:
387                         print("  {}:\n"
388                               "    fdio::vpp_cpu_corelist_workers: '{}'"
389                               "".format(cfg, perf_opts['corelist-workers']))
390                         line_printed = True
391                         break
392
393             # kernel args
394             # (FIXME) use compute's kernel settings for all nodes for now.
395             if 'ComputeKernelArgs' in line and perf_kern_comp:
396                 kernel_args = ''
397                 for k, v in perf_kern_comp.items():
398                     kernel_args += "{}={}".format(k, v)
399                 if kernel_args:
400                     print("ComputeKernelArgs: '{}'".format(kernel_args))
401                     line_printed = True
402             elif ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
403                 for k, v in OVS_PERF_MAP.items():
404                     if k in line and v in perf_ovs_comp:
405                         print("  {}: {}".format(k, perf_ovs_comp[v]))
406                         line_printed = True
407
408             if not line_printed:
409                 print(line)
410         elif 'replace_private_key' in line:
411             print("      key: '{}'".format(private_key))
412         elif 'replace_public_key' in line:
413             print("      key: '{}'".format(public_key))
414         else:
415             print(line)
416
417     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
418
419     # Modify Network environment
420     for line in fileinput.input(net_env, inplace=True):
421         line = line.strip('\n')
422         if ds_opts['dataplane'] == 'ovs_dpdk':
423             if 'ComputeExtraConfigPre' in line:
424                 print('  OS::TripleO::ComputeExtraConfigPre: '
425                       './ovs-dpdk-preconfig.yaml')
426             else:
427                 print(line)
428         elif perf and perf_kern_comp:
429             if 'resource_registry' in line:
430                 print("resource_registry:\n"
431                       "  OS::TripleO::NodeUserData: first-boot.yaml")
432             elif 'NovaSchedulerDefaultFilters' in line:
433                 print("  NovaSchedulerDefaultFilters: 'RamFilter,"
434                       "ComputeFilter,AvailabilityZoneFilter,"
435                       "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
436                       "NUMATopologyFilter'")
437             else:
438                 print(line)
439         else:
440             print(line)
441
442     logging.info("network-environment file written to {}".format(net_env))
443
444
445 def generate_ceph_key():
446     key = os.urandom(16)
447     header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
448     return base64.b64encode(header + key)
449
450
451 def prep_storage_env(ds, tmp_dir):
452     """
453     Creates storage environment file for deployment.  Source file is copied by
454     undercloud playbook to host.
455     :param ds:
456     :param tmp_dir:
457     :return:
458     """
459     ds_opts = ds['deploy_options']
460     storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
461     if not os.path.isfile(storage_file):
462         logging.error("storage-environment file is not in tmp directory: {}. "
463                       "Check if file was copied from "
464                       "undercloud".format(tmp_dir))
465         raise ApexDeployException("storage-environment file not copied from "
466                                   "undercloud")
467     for line in fileinput.input(storage_file, inplace=True):
468         line = line.strip('\n')
469         if 'CephClusterFSID' in line:
470             print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
471         elif 'CephMonKey' in line:
472             print("  CephMonKey: {}".format(generate_ceph_key().decode(
473                 'utf-8')))
474         elif 'CephAdminKey' in line:
475             print("  CephAdminKey: {}".format(generate_ceph_key().decode(
476                 'utf-8')))
477         else:
478             print(line)
479     if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
480         with open(storage_file, 'a') as fh:
481             fh.write('  ExtraConfig:\n')
482             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
483                 ds_opts['ceph_device']
484             ))
485
486
487 def external_network_cmds(ns):
488     """
489     Generates external network openstack commands
490     :param ns: network settings
491     :return: list of commands to configure external network
492     """
493     if 'external' in ns.enabled_network_list:
494         net_config = ns['networks']['external'][0]
495         external = True
496         pool_start, pool_end = net_config['floating_ip_range']
497     else:
498         net_config = ns['networks']['admin']
499         external = False
500         pool_start, pool_end = ns['apex']['networks']['admin'][
501             'introspection_range']
502     nic_config = net_config['nic_mapping']
503     gateway = net_config['gateway']
504     cmds = list()
505     # create network command
506     if nic_config['compute']['vlan'] == 'native':
507         ext_type = 'flat'
508     else:
509         ext_type = "vlan --provider-segment {}".format(nic_config[
510                                                        'compute']['vlan'])
511     cmds.append("openstack network create external --project service "
512                 "--external --provider-network-type {} "
513                 "--provider-physical-network datacentre".format(ext_type))
514     # create subnet command
515     cidr = net_config['cidr']
516     subnet_cmd = "openstack subnet create external-subnet --project " \
517                  "service --network external --no-dhcp --gateway {} " \
518                  "--allocation-pool start={},end={} --subnet-range " \
519                  "{}".format(gateway, pool_start, pool_end, str(cidr))
520     if external and cidr.version == 6:
521         subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
522                       '--ipv6-address-mode slaac'
523     cmds.append(subnet_cmd)
524     logging.debug("Neutron external network commands determined "
525                   "as: {}".format(cmds))
526     return cmds
527
528
529 def create_congress_cmds(overcloud_file):
530     drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
531     overcloudrc = parsers.parse_overcloudrc(overcloud_file)
532     logging.info("Creating congress commands")
533     try:
534         ds_cfg = [
535             "username={}".format(overcloudrc['OS_USERNAME']),
536             "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
537             "password={}".format(overcloudrc['OS_PASSWORD']),
538             "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
539         ]
540     except KeyError:
541         logging.error("Unable to find all keys required for congress in "
542                       "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
543                       "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
544                       "file: {}".format(overcloud_file))
545         raise
546     cmds = list()
547     ds_cfg = '--config ' + ' --config '.join(ds_cfg)
548
549     for driver in drivers:
550         if driver == 'doctor':
551             cmd = "{} \"{}\"".format(driver, driver)
552         else:
553             cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
554         if driver == 'nova':
555             cmd += ' --config api_version="2.34"'
556         logging.debug("Congress command created: {}".format(cmd))
557         cmds.append(cmd)
558     return cmds