Bring in aarch64 support in apex
[apex.git] / apex / deploy.py
1 #!/usr/bin/env python
2
3 ##############################################################################
4 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
5 #
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 ##############################################################################
11
12 import argparse
13 import git
14 import json
15 import logging
16 import os
17 import platform
18 import pprint
19 import shutil
20 import sys
21 import tempfile
22 import yaml
23
24 import apex.virtual.configure_vm as vm_lib
25 import apex.virtual.utils as virt_utils
26 import apex.builders.common_builder as c_builder
27 import apex.builders.overcloud_builder as oc_builder
28 import apex.builders.undercloud_builder as uc_builder
29 from apex import DeploySettings
30 from apex import Inventory
31 from apex import NetworkEnvironment
32 from apex import NetworkSettings
33 from apex.deployment.snapshot import SnapshotDeployment
34 from apex.common import utils
35 from apex.common import constants
36 from apex.common import parsers
37 from apex.common.exceptions import ApexDeployException
38 from apex.deployment.tripleo import ApexDeployment
39 from apex.network import jumphost
40 from apex.network import network_data
41 from apex.undercloud import undercloud as uc_lib
42 from apex.overcloud import config as oc_cfg
43 from apex.overcloud import deploy as oc_deploy
44
45 APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
46 SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
47 UC_DISK_FILES = [
48     'overcloud-full.vmlinuz',
49     'overcloud-full.initrd',
50     'ironic-python-agent.initramfs',
51     'ironic-python-agent.kernel'
52 ]
53
54
55 def validate_cross_settings(deploy_settings, net_settings, inventory):
56     """
57     Used to validate compatibility across settings file.
58     :param deploy_settings: parsed settings for deployment
59     :param net_settings: parsed settings for network
60     :param inventory: parsed inventory file
61     :return: None
62     """
63
64     if deploy_settings['deploy_options']['dataplane'] != 'ovs' and 'tenant' \
65             not in net_settings.enabled_network_list:
66         raise ApexDeployException("Setting a DPDK based dataplane requires"
67                                   "a dedicated NIC for tenant network")
68
69     if 'odl_vpp_routing_node' in deploy_settings['deploy_options']:
70         if deploy_settings['deploy_options']['dataplane'] != 'fdio':
71             raise ApexDeployException("odl_vpp_routing_node should only be set"
72                                       "when dataplane is set to fdio")
73         if deploy_settings['deploy_options'].get('dvr') is True:
74             raise ApexDeployException("odl_vpp_routing_node should only be set"
75                                       "when dvr is not enabled")
76
77     # TODO(trozet): add more checks here like RAM for ODL, etc
78     # check if odl_vpp_netvirt is true and vpp is set
79     # Check if fdio and nosdn:
80     # tenant_nic_mapping_controller_members" ==
81     # "$tenant_nic_mapping_compute_members
82
83
84 def build_vms(inventory, network_settings,
85               template_dir='/usr/share/opnfv-apex'):
86     """
87     Creates VMs and configures vbmc and host
88     :param inventory:
89     :param network_settings:
90     :return:
91     """
92
93     for idx, node in enumerate(inventory['nodes']):
94         name = 'baremetal{}'.format(idx)
95         volume = name + ".qcow2"
96         volume_path = os.path.join(constants.LIBVIRT_VOLUME_PATH, volume)
97         # TODO(trozet): add error checking
98         vm_lib.create_vm(
99             name, volume_path,
100             baremetal_interfaces=network_settings.enabled_network_list,
101             memory=node['memory'], cpus=node['cpu'],
102             macs=node['mac'],
103             template_dir=template_dir)
104         virt_utils.host_setup({name: node['pm_port']})
105
106
107 def create_deploy_parser():
108     deploy_parser = argparse.ArgumentParser()
109     deploy_parser.add_argument('--debug', action='store_true', default=False,
110                                help="Turn on debug messages")
111     deploy_parser.add_argument('-l', '--log-file',
112                                default='./apex_deploy.log',
113                                dest='log_file', help="Log file to log to")
114     deploy_parser.add_argument('-d', '--deploy-settings',
115                                dest='deploy_settings_file',
116                                required=True,
117                                help='File which contains Apex deploy settings')
118     deploy_parser.add_argument('-n', '--network-settings',
119                                dest='network_settings_file',
120                                required=False,
121                                help='File which contains Apex network '
122                                     'settings')
123     deploy_parser.add_argument('-i', '--inventory-file',
124                                dest='inventory_file',
125                                default=None,
126                                help='Inventory file which contains POD '
127                                     'definition')
128     deploy_parser.add_argument('-e', '--environment-file',
129                                dest='env_file',
130                                default='opnfv-environment.yaml',
131                                help='Provide alternate base env file located '
132                                     'in deploy_dir')
133     deploy_parser.add_argument('-v', '--virtual', action='store_true',
134                                default=False,
135                                dest='virtual',
136                                help='Enable virtual deployment')
137     deploy_parser.add_argument('--interactive', action='store_true',
138                                default=False,
139                                help='Enable interactive deployment mode which '
140                                     'requires user to confirm steps of '
141                                     'deployment')
142     deploy_parser.add_argument('--virtual-computes',
143                                dest='virt_compute_nodes',
144                                default=1,
145                                type=int,
146                                help='Number of Virtual Compute nodes to create'
147                                     ' and use during deployment (defaults to 1'
148                                     ' for noha and 2 for ha)')
149     deploy_parser.add_argument('--virtual-cpus',
150                                dest='virt_cpus',
151                                default=4,
152                                type=int,
153                                help='Number of CPUs to use per Overcloud VM in'
154                                     ' a virtual deployment (defaults to 4)')
155     deploy_parser.add_argument('--virtual-default-ram',
156                                dest='virt_default_ram',
157                                default=8,
158                                type=int,
159                                help='Amount of default RAM to use per '
160                                     'Overcloud VM in GB (defaults to 8).')
161     deploy_parser.add_argument('--virtual-compute-ram',
162                                dest='virt_compute_ram',
163                                default=None,
164                                type=int,
165                                help='Amount of RAM to use per Overcloud '
166                                     'Compute VM in GB (defaults to 8). '
167                                     'Overrides --virtual-default-ram arg for '
168                                     'computes')
169     deploy_parser.add_argument('--deploy-dir',
170                                default='/usr/share/opnfv-apex',
171                                help='Directory to deploy from which contains '
172                                     'base config files for deployment')
173     deploy_parser.add_argument('--image-dir',
174                                default='/var/opt/opnfv/images',
175                                help='Directory which contains '
176                                     'base disk images for deployment')
177     deploy_parser.add_argument('--lib-dir',
178                                default='/usr/share/opnfv-apex',
179                                help='Directory path for apex ansible '
180                                     'and third party libs')
181     deploy_parser.add_argument('-s', '--snapshot', action='store_true',
182                                default=False,
183                                help='Use snapshots for deployment')
184     deploy_parser.add_argument('--snap-cache', dest='snap_cache',
185                                default="{}/snap_cache".format(
186                                    os.path.expanduser('~')),
187                                help='Local directory to cache snapshot '
188                                     'artifacts. Defaults to $HOME/snap_cache')
189     deploy_parser.add_argument('--upstream', action='store_true',
190                                default=True,
191                                help='Force deployment to use upstream '
192                                     'artifacts. This option is now '
193                                     'deprecated and only upstream '
194                                     'deployments are supported.')
195     deploy_parser.add_argument('--no-fetch', action='store_true',
196                                default=False,
197                                help='Ignore fetching latest upstream and '
198                                     'use what is in cache')
199     deploy_parser.add_argument('-p', '--patches',
200                                default='/etc/opnfv-apex/common-patches.yaml',
201                                dest='patches_file',
202                                help='File to include for common patches '
203                                     'which apply to all deployment scenarios')
204     return deploy_parser
205
206
207 def validate_deploy_args(args):
208     """
209     Validates arguments for deploy
210     :param args:
211     :return: None
212     """
213
214     logging.debug('Validating arguments for deployment')
215     if args.snapshot:
216         logging.debug('Skipping inventory validation as it is not applicable'
217                       'to snapshot deployments')
218     elif args.virtual and args.inventory_file is not None:
219         logging.error("Virtual enabled but inventory file also given")
220         raise ApexDeployException('You should not specify an inventory file '
221                                   'with virtual deployments')
222     elif args.virtual:
223         args.inventory_file = os.path.join(APEX_TEMP_DIR,
224                                            'inventory-virt.yaml')
225     elif not os.path.isfile(args.inventory_file):
226         logging.error("Specified inventory file does not exist: {}".format(
227             args.inventory_file))
228         raise ApexDeployException('Specified inventory file does not exist')
229
230     for settings_file in (args.deploy_settings_file,
231                           args.network_settings_file):
232         if settings_file == args.network_settings_file and args.snapshot:
233             continue
234         if os.path.isfile(settings_file) is False:
235             logging.error("Specified settings file does not "
236                           "exist: {}".format(settings_file))
237             raise ApexDeployException('Specified settings file does not '
238                                       'exist: {}'.format(settings_file))
239
240
241 def main():
242     parser = create_deploy_parser()
243     args = parser.parse_args(sys.argv[1:])
244     # FIXME (trozet): this is only needed as a workaround for CI.  Remove
245     # when CI is changed
246     if os.getenv('IMAGES', False):
247         args.image_dir = os.getenv('IMAGES')
248     if args.debug:
249         log_level = logging.DEBUG
250     else:
251         log_level = logging.INFO
252     os.makedirs(os.path.dirname(args.log_file), exist_ok=True)
253     formatter = '%(asctime)s %(levelname)s: %(message)s'
254     logging.basicConfig(filename=args.log_file,
255                         format=formatter,
256                         datefmt='%m/%d/%Y %I:%M:%S %p',
257                         level=log_level)
258     console = logging.StreamHandler()
259     console.setLevel(log_level)
260     console.setFormatter(logging.Formatter(formatter))
261     logging.getLogger('').addHandler(console)
262     utils.install_ansible()
263     validate_deploy_args(args)
264     # Parse all settings
265     deploy_settings = DeploySettings(args.deploy_settings_file)
266     logging.info("Deploy settings are:\n {}".format(pprint.pformat(
267         deploy_settings)))
268
269     if not args.snapshot:
270         net_settings = NetworkSettings(args.network_settings_file)
271         logging.info("Network settings are:\n {}".format(pprint.pformat(
272             net_settings)))
273         os_version = deploy_settings['deploy_options']['os_version']
274         net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
275         net_env = NetworkEnvironment(net_settings, net_env_file,
276                                      os_version=os_version)
277         net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
278         utils.dump_yaml(dict(net_env), net_env_target)
279
280         # get global deploy params
281         ha_enabled = deploy_settings['global_params']['ha_enabled']
282         introspect = deploy_settings['global_params'].get('introspect', True)
283         net_list = net_settings.enabled_network_list
284         if args.virtual:
285             if args.virt_compute_ram is None:
286                 compute_ram = args.virt_default_ram
287             else:
288                 compute_ram = args.virt_compute_ram
289             if (deploy_settings['deploy_options']['sdn_controller'] ==
290                     'opendaylight' and args.virt_default_ram < 12):
291                 control_ram = 12
292                 logging.warning('RAM per controller is too low.  OpenDaylight '
293                                 'requires at least 12GB per controller.')
294                 logging.info('Increasing RAM per controller to 12GB')
295             elif args.virt_default_ram < 10:
296                 if platform.machine() == 'aarch64':
297                     control_ram = 16
298                     logging.warning('RAM per controller is too low for '
299                                     'aarch64 ')
300                     logging.info('Increasing RAM per controller to 16GB')
301                 else:
302                     control_ram = 10
303                     logging.warning('RAM per controller is too low.  nosdn '
304                                     'requires at least 10GB per controller.')
305                     logging.info('Increasing RAM per controller to 10GB')
306             else:
307                 control_ram = args.virt_default_ram
308             if platform.machine() == 'aarch64' and args.virt_cpus < 16:
309                 vcpus = 16
310                 logging.warning('aarch64 requires at least 16 vCPUS per '
311                                 'target VM. Increasing to 16.')
312             else:
313                 vcpus = args.virt_cpus
314             if ha_enabled and args.virt_compute_nodes < 2:
315                 logging.debug(
316                     'HA enabled, bumping number of compute nodes to 2')
317                 args.virt_compute_nodes = 2
318             virt_utils.generate_inventory(args.inventory_file, ha_enabled,
319                                           num_computes=args.virt_compute_nodes,
320                                           controller_ram=control_ram * 1024,
321                                           compute_ram=compute_ram * 1024,
322                                           vcpus=vcpus
323                                           )
324         inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
325         logging.info("Inventory is:\n {}".format(pprint.pformat(
326             inventory)))
327
328         validate_cross_settings(deploy_settings, net_settings, inventory)
329     else:
330         # only one network with snapshots
331         net_list = [constants.ADMIN_NETWORK]
332
333     ds_opts = deploy_settings['deploy_options']
334     ansible_args = {
335         'virsh_enabled_networks': net_list,
336         'snapshot': args.snapshot
337     }
338     utils.run_ansible(ansible_args,
339                       os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
340                                    'deploy_dependencies.yml'))
341     if args.snapshot:
342         # Start snapshot Deployment
343         logging.info('Executing Snapshot Deployment...')
344         SnapshotDeployment(deploy_settings=deploy_settings,
345                            snap_cache_dir=args.snap_cache,
346                            fetch=not args.no_fetch,
347                            all_in_one=not bool(args.virt_compute_nodes))
348     else:
349         # Start Standard TripleO Deployment
350         deployment = ApexDeployment(deploy_settings, args.patches_file,
351                                     args.deploy_settings_file)
352         # TODO (trozet): add logic back from:
353         # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
354         uc_external = False
355         if 'external' in net_settings.enabled_network_list:
356             uc_external = True
357         if args.virtual:
358             # create all overcloud VMs
359             build_vms(inventory, net_settings, args.deploy_dir)
360         else:
361             # Attach interfaces to jumphost for baremetal deployment
362             jump_networks = ['admin']
363             if uc_external:
364                 jump_networks.append('external')
365             for network in jump_networks:
366                 if network == 'external':
367                     # TODO(trozet): enable vlan secondary external networks
368                     iface = net_settings['networks'][network][0][
369                         'installer_vm']['members'][0]
370                 else:
371                     iface = net_settings['networks'][network]['installer_vm'][
372                         'members'][0]
373                 bridge = "br-{}".format(network)
374                 jumphost.attach_interface_to_ovs(bridge, iface, network)
375         instackenv_json = os.path.join(APEX_TEMP_DIR, 'instackenv.json')
376         with open(instackenv_json, 'w') as fh:
377             json.dump(inventory, fh)
378
379         # Create and configure undercloud
380         if args.debug:
381             root_pw = constants.DEBUG_OVERCLOUD_PW
382         else:
383             root_pw = None
384
385         if not args.upstream:
386             logging.warning("Using upstream is now required for Apex. "
387                             "Forcing upstream to true")
388         if os_version == 'master':
389             branch = 'master'
390         else:
391             branch = "stable/{}".format(os_version)
392
393         logging.info("Deploying with upstream artifacts for OpenStack "
394                      "{}".format(os_version))
395         args.image_dir = os.path.join(args.image_dir, os_version)
396         upstream_url = constants.UPSTREAM_RDO.replace(
397             constants.DEFAULT_OS_VERSION, os_version)
398         upstream_targets = ['overcloud-full.tar', 'ironic-python-agent.tar']
399         utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
400                                         upstream_targets,
401                                         fetch=not args.no_fetch)
402         # Copy ironic files and overcloud ramdisk and kernel into temp dir
403         # to be copied by ansible into undercloud /home/stack
404         # Note the overcloud disk does not need to be copied here as it will
405         # be modified and copied later
406         for tmp_file in UC_DISK_FILES:
407             shutil.copyfile(os.path.join(args.image_dir, tmp_file),
408                             os.path.join(APEX_TEMP_DIR, tmp_file))
409         sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
410         # copy undercloud so we don't taint upstream fetch
411         uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
412         uc_fetch_img = sdn_image
413         shutil.copyfile(uc_fetch_img, uc_image)
414         # prep undercloud with required packages
415         if platform.machine() != 'aarch64':
416             uc_builder.update_repos(image=uc_image,
417                                     branch=branch.replace('stable/', ''))
418         uc_builder.add_upstream_packages(uc_image)
419         uc_builder.inject_calipso_installer(APEX_TEMP_DIR, uc_image)
420         # add patches from upstream to undercloud and overcloud
421         logging.info('Adding patches to undercloud')
422         patches = deployment.determine_patches()
423         c_builder.add_upstream_patches(patches['undercloud'], uc_image,
424                                        APEX_TEMP_DIR, branch)
425
426         # Create/Start Undercloud VM
427         undercloud = uc_lib.Undercloud(args.image_dir,
428                                        args.deploy_dir,
429                                        root_pw=root_pw,
430                                        external_network=uc_external,
431                                        image_name=os.path.basename(uc_image),
432                                        os_version=os_version)
433         undercloud.start()
434         undercloud_admin_ip = net_settings['networks'][
435             constants.ADMIN_NETWORK]['installer_vm']['ip']
436
437         if ds_opts['containers']:
438             tag = constants.DOCKER_TAG
439         else:
440             tag = None
441
442         # Generate nic templates
443         for role in 'compute', 'controller':
444             oc_cfg.create_nic_template(net_settings, deploy_settings, role,
445                                        args.deploy_dir, APEX_TEMP_DIR)
446         # Prepare/Upload docker images
447         docker_env = 'containers-prepare-parameter.yaml'
448         shutil.copyfile(os.path.join(args.deploy_dir, docker_env),
449                         os.path.join(APEX_TEMP_DIR, docker_env))
450         # Upload extra ansible.cfg
451         if platform.machine() == 'aarch64':
452             ansible_env = 'ansible.cfg'
453             shutil.copyfile(os.path.join(args.deploy_dir, ansible_env),
454                             os.path.join(APEX_TEMP_DIR, ansible_env))
455
456         c_builder.prepare_container_images(
457             os.path.join(APEX_TEMP_DIR, docker_env),
458             branch=branch.replace('stable/', ''),
459             neutron_driver=c_builder.get_neutron_driver(ds_opts)
460         )
461         # Install Undercloud
462         undercloud.configure(net_settings, deploy_settings,
463                              os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
464                                           'configure_undercloud.yml'),
465                              APEX_TEMP_DIR, virtual_oc=args.virtual)
466
467         # Prepare overcloud-full.qcow2
468         logging.info("Preparing Overcloud for deployment...")
469         if os_version != 'ocata':
470             net_data_file = os.path.join(APEX_TEMP_DIR, 'network_data.yaml')
471             net_data = network_data.create_network_data(net_settings,
472                                                         net_data_file)
473         else:
474             net_data = False
475
476         shutil.copyfile(os.path.join(args.deploy_dir, 'build_ovs_nsh.sh'),
477                         os.path.join(APEX_TEMP_DIR, 'build_ovs_nsh.sh'))
478
479         # TODO(trozet): Either fix opnfv env or default to use upstream env
480         if args.env_file == 'opnfv-environment.yaml':
481             # Override the env_file if it is defaulted to opnfv
482             # opnfv env file will not work with upstream
483             args.env_file = 'upstream-environment.yaml'
484         opnfv_env = os.path.join(args.deploy_dir, args.env_file)
485         oc_deploy.prep_env(deploy_settings, net_settings, inventory,
486                            opnfv_env, net_env_target, APEX_TEMP_DIR)
487         if not args.virtual:
488             oc_deploy.LOOP_DEVICE_SIZE = "50G"
489         patched_containers = oc_deploy.prep_image(
490             deploy_settings, net_settings, sdn_image, APEX_TEMP_DIR,
491             root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'])
492
493         oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
494                                     APEX_TEMP_DIR, args.virtual,
495                                     os.path.basename(opnfv_env),
496                                     net_data=net_data)
497         # Prepare undercloud with containers
498         docker_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
499                                        'prepare_overcloud_containers.yml')
500         if ds_opts['containers']:
501             logging.info("Preparing Undercloud with Docker containers")
502             sdn_env = oc_deploy.get_docker_sdn_files(ds_opts)
503             sdn_env_files = str()
504             for sdn_file in sdn_env:
505                 sdn_env_files += " -e {}".format(sdn_file)
506             if patched_containers:
507                 oc_builder.archive_docker_patches(APEX_TEMP_DIR)
508             container_vars = dict()
509             container_vars['apex_temp_dir'] = APEX_TEMP_DIR
510             container_vars['patched_docker_services'] = list(
511                 patched_containers)
512             container_vars['container_tag'] = constants.DOCKER_TAG
513             container_vars['stackrc'] = 'source /home/stack/stackrc'
514             container_vars['sdn'] = ds_opts['sdn_controller']
515             container_vars['undercloud_ip'] = undercloud_admin_ip
516             container_vars['os_version'] = os_version
517             container_vars['aarch64'] = platform.machine() == 'aarch64'
518             container_vars['sdn_env_file'] = sdn_env_files
519             try:
520                 utils.run_ansible(container_vars, docker_playbook,
521                                   host=undercloud.ip, user='stack',
522                                   tmp_dir=APEX_TEMP_DIR)
523                 logging.info("Container preparation complete")
524             except Exception:
525                 logging.error("Unable to complete container prep on "
526                               "Undercloud")
527                 for tmp_file in UC_DISK_FILES:
528                     os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
529                 os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
530                 raise
531
532         deploy_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
533                                        'deploy_overcloud.yml')
534         virt_env = 'virtual-environment.yaml'
535         bm_env = 'baremetal-environment.yaml'
536         k8s_env = 'kubernetes-environment.yaml'
537         for p_env in virt_env, bm_env, k8s_env:
538             shutil.copyfile(os.path.join(args.deploy_dir, p_env),
539                             os.path.join(APEX_TEMP_DIR, p_env))
540
541         # Start Overcloud Deployment
542         logging.info("Executing Overcloud Deployment...")
543         deploy_vars = dict()
544         deploy_vars['virtual'] = args.virtual
545         deploy_vars['debug'] = args.debug
546         deploy_vars['aarch64'] = platform.machine() == 'aarch64'
547         deploy_vars['introspect'] = not (args.virtual or
548                                          deploy_vars['aarch64'] or
549                                          not introspect)
550         deploy_vars['dns_server_args'] = ''
551         deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
552         deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
553         deploy_vars['stackrc'] = 'source /home/stack/stackrc'
554         deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
555         deploy_vars['undercloud_ip'] = undercloud_admin_ip
556         deploy_vars['ha_enabled'] = ha_enabled
557         deploy_vars['os_version'] = os_version
558         deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
559         deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
560         deploy_vars['vim'] = ds_opts['vim']
561         for dns_server in net_settings['dns_servers']:
562             deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
563                 dns_server)
564         try:
565             utils.run_ansible(deploy_vars, deploy_playbook, host=undercloud.ip,
566                               user='stack', tmp_dir=APEX_TEMP_DIR)
567             logging.info("Overcloud deployment complete")
568         except Exception:
569             logging.error("Deployment Failed.  Please check deploy log as "
570                           "well as mistral logs in "
571                           "{}".format(os.path.join(APEX_TEMP_DIR,
572                                                    'mistral_logs.tar.gz')))
573             raise
574         finally:
575             os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
576             for tmp_file in UC_DISK_FILES:
577                 os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
578
579         # Post install
580         logging.info("Executing post deploy configuration")
581         jumphost.configure_bridges(net_settings)
582         nova_output = os.path.join(APEX_TEMP_DIR, 'nova_output')
583         deploy_vars['overcloud_nodes'] = parsers.parse_nova_output(
584             nova_output)
585         deploy_vars['SSH_OPTIONS'] = '-o StrictHostKeyChecking=no -o ' \
586                                      'GlobalKnownHostsFile=/dev/null -o ' \
587                                      'UserKnownHostsFile=/dev/null -o ' \
588                                      'LogLevel=error'
589         deploy_vars['external_network_cmds'] = \
590             oc_deploy.external_network_cmds(net_settings, deploy_settings)
591         # TODO(trozet): just parse all ds_opts as deploy vars one time
592         deploy_vars['gluon'] = ds_opts['gluon']
593         deploy_vars['sdn'] = ds_opts['sdn_controller']
594         for dep_option in 'yardstick', 'dovetail', 'vsperf':
595             if dep_option in ds_opts:
596                 deploy_vars[dep_option] = ds_opts[dep_option]
597             else:
598                 deploy_vars[dep_option] = False
599         deploy_vars['dataplane'] = ds_opts['dataplane']
600         overcloudrc = os.path.join(APEX_TEMP_DIR, 'overcloudrc')
601         if ds_opts['congress']:
602             deploy_vars['congress_datasources'] = \
603                 oc_deploy.create_congress_cmds(overcloudrc)
604             deploy_vars['congress'] = True
605         else:
606             deploy_vars['congress'] = False
607         deploy_vars['calipso'] = ds_opts.get('calipso', False)
608         deploy_vars['calipso_ip'] = undercloud_admin_ip
609         # overcloudrc.v3 removed and set as default in queens and later
610         if os_version == 'pike':
611             deploy_vars['overcloudrc_files'] = ['overcloudrc',
612                                                 'overcloudrc.v3']
613         else:
614             deploy_vars['overcloudrc_files'] = ['overcloudrc']
615
616         post_undercloud = os.path.join(args.lib_dir,
617                                        constants.ANSIBLE_PATH,
618                                        'post_deploy_undercloud.yml')
619         logging.info("Executing post deploy configuration undercloud "
620                      "playbook")
621         try:
622             utils.run_ansible(deploy_vars, post_undercloud,
623                               host=undercloud.ip, user='stack',
624                               tmp_dir=APEX_TEMP_DIR)
625             logging.info("Post Deploy Undercloud Configuration Complete")
626         except Exception:
627             logging.error("Post Deploy Undercloud Configuration failed.  "
628                           "Please check log")
629             raise
630
631         # Deploy kubernetes if enabled
632         # (TODO)zshi move handling of kubernetes deployment
633         # to its own deployment class
634         if deploy_vars['vim'] == 'k8s':
635             # clone kubespray repo
636             git.Repo.clone_from(constants.KUBESPRAY_URL,
637                                 os.path.join(APEX_TEMP_DIR, 'kubespray'))
638             shutil.copytree(
639                 os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
640                              'sample'),
641                 os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
642                              'apex'))
643             k8s_node_inventory = {
644                 'all':
645                     {'hosts': {},
646                      'children': {
647                          'k8s-cluster': {
648                              'children': {
649                                  'kube-master': {
650                                      'hosts': {}
651                                  },
652                                  'kube-node': {
653                                      'hosts': {}
654                                  }
655                              }
656                          },
657                          'etcd': {
658                              'hosts': {}
659                          }
660                     }
661                     }
662             }
663             for node, ip in deploy_vars['overcloud_nodes'].items():
664                 k8s_node_inventory['all']['hosts'][node] = {
665                     'ansible_become': True,
666                     'ansible_ssh_host': ip,
667                     'ansible_become_user': 'root',
668                     'ip': ip
669                 }
670                 if 'controller' in node:
671                     k8s_node_inventory['all']['children']['k8s-cluster'][
672                         'children']['kube-master']['hosts'][node] = None
673                     k8s_node_inventory['all']['children']['etcd'][
674                         'hosts'][node] = None
675                 elif 'compute' in node:
676                     k8s_node_inventory['all']['children']['k8s-cluster'][
677                         'children']['kube-node']['hosts'][node] = None
678
679             kubespray_dir = os.path.join(APEX_TEMP_DIR, 'kubespray')
680             with open(os.path.join(kubespray_dir, 'inventory', 'apex',
681                                    'apex.yaml'), 'w') as invfile:
682                 yaml.dump(k8s_node_inventory, invfile,
683                           default_flow_style=False)
684             k8s_deploy_vars = {}
685             # Add kubespray ansible control variables in k8s_deploy_vars,
686             # example: 'kube_network_plugin': 'flannel'
687             k8s_deploy = os.path.join(kubespray_dir, 'cluster.yml')
688             k8s_deploy_inv_file = os.path.join(kubespray_dir, 'inventory',
689                                                'apex', 'apex.yaml')
690
691             k8s_remove_pkgs = os.path.join(args.lib_dir,
692                                            constants.ANSIBLE_PATH,
693                                            'k8s_remove_pkgs.yml')
694             try:
695                 logging.debug("Removing any existing overcloud docker "
696                               "packages")
697                 utils.run_ansible(k8s_deploy_vars, k8s_remove_pkgs,
698                                   host=k8s_deploy_inv_file,
699                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
700                 logging.info("k8s Deploy Remove Existing Docker Related "
701                              "Packages Complete")
702             except Exception:
703                 logging.error("k8s Deploy Remove Existing Docker Related "
704                               "Packages failed. Please check log")
705                 raise
706
707             try:
708                 utils.run_ansible(k8s_deploy_vars, k8s_deploy,
709                                   host=k8s_deploy_inv_file,
710                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
711                 logging.info("k8s Deploy Overcloud Configuration Complete")
712             except Exception:
713                 logging.error("k8s Deploy Overcloud Configuration failed."
714                               "Please check log")
715                 raise
716
717         # Post deploy overcloud node configuration
718         # TODO(trozet): just parse all ds_opts as deploy vars one time
719         deploy_vars['sfc'] = ds_opts['sfc']
720         deploy_vars['vpn'] = ds_opts['vpn']
721         deploy_vars['l2gw'] = ds_opts.get('l2gw')
722         deploy_vars['sriov'] = ds_opts.get('sriov')
723         deploy_vars['tacker'] = ds_opts.get('tacker')
724         # TODO(trozet): pull all logs and store in tmp dir in overcloud
725         # playbook
726         post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
727                                       'post_deploy_overcloud.yml')
728         # Run per overcloud node
729         for node, ip in deploy_vars['overcloud_nodes'].items():
730             logging.info("Executing Post deploy overcloud playbook on "
731                          "node {}".format(node))
732             try:
733                 utils.run_ansible(deploy_vars, post_overcloud, host=ip,
734                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
735                 logging.info("Post Deploy Overcloud Configuration Complete "
736                              "for node {}".format(node))
737             except Exception:
738                 logging.error("Post Deploy Overcloud Configuration failed "
739                               "for node {}. Please check log".format(node))
740                 raise
741         logging.info("Apex deployment complete")
742         logging.info("Undercloud IP: {}, please connect by doing "
743                      "'opnfv-util undercloud'".format(undercloud.ip))
744         # TODO(trozet): add logging here showing controller VIP and horizon url
745
746
747 if __name__ == '__main__':
748     main()