bb011f92d5238f6631e82c251861f9d5cfb5b346
[apex.git] / apex / deploy.py
1 #!/usr/bin/env python
2
3 ##############################################################################
4 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
5 #
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 ##############################################################################
11
12 import argparse
13 import git
14 import json
15 import logging
16 import os
17 import platform
18 import pprint
19 import shutil
20 import sys
21 import tempfile
22 import yaml
23
24 import apex.virtual.configure_vm as vm_lib
25 import apex.virtual.utils as virt_utils
26 import apex.builders.common_builder as c_builder
27 import apex.builders.overcloud_builder as oc_builder
28 import apex.builders.undercloud_builder as uc_builder
29 from apex import DeploySettings
30 from apex import Inventory
31 from apex import NetworkEnvironment
32 from apex import NetworkSettings
33 from apex.deployment.snapshot import SnapshotDeployment
34 from apex.common import utils
35 from apex.common import constants
36 from apex.common import parsers
37 from apex.common.exceptions import ApexDeployException
38 from apex.deployment.tripleo import ApexDeployment
39 from apex.network import jumphost
40 from apex.network import network_data
41 from apex.undercloud import undercloud as uc_lib
42 from apex.overcloud import config as oc_cfg
43 from apex.overcloud import deploy as oc_deploy
44
45 APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
46 SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
47 UC_DISK_FILES = [
48     'overcloud-full.vmlinuz',
49     'overcloud-full.initrd',
50     'ironic-python-agent.initramfs',
51     'ironic-python-agent.kernel'
52 ]
53
54
55 def validate_cross_settings(deploy_settings, net_settings, inventory):
56     """
57     Used to validate compatibility across settings file.
58     :param deploy_settings: parsed settings for deployment
59     :param net_settings: parsed settings for network
60     :param inventory: parsed inventory file
61     :return: None
62     """
63
64     if deploy_settings['deploy_options']['dataplane'] != 'ovs' and 'tenant' \
65             not in net_settings.enabled_network_list:
66         raise ApexDeployException("Setting a DPDK based dataplane requires"
67                                   "a dedicated NIC for tenant network")
68
69     if 'odl_vpp_routing_node' in deploy_settings['deploy_options']:
70         if deploy_settings['deploy_options']['dataplane'] != 'fdio':
71             raise ApexDeployException("odl_vpp_routing_node should only be set"
72                                       "when dataplane is set to fdio")
73         if deploy_settings['deploy_options'].get('dvr') is True:
74             raise ApexDeployException("odl_vpp_routing_node should only be set"
75                                       "when dvr is not enabled")
76
77     # TODO(trozet): add more checks here like RAM for ODL, etc
78     # check if odl_vpp_netvirt is true and vpp is set
79     # Check if fdio and nosdn:
80     # tenant_nic_mapping_controller_members" ==
81     # "$tenant_nic_mapping_compute_members
82
83
84 def build_vms(inventory, network_settings,
85               template_dir='/usr/share/opnfv-apex'):
86     """
87     Creates VMs and configures vbmc and host
88     :param inventory:
89     :param network_settings:
90     :return:
91     """
92
93     for idx, node in enumerate(inventory['nodes']):
94         name = 'baremetal{}'.format(idx)
95         volume = name + ".qcow2"
96         volume_path = os.path.join(constants.LIBVIRT_VOLUME_PATH, volume)
97         # TODO(trozet): add error checking
98         vm_lib.create_vm(
99             name, volume_path,
100             baremetal_interfaces=network_settings.enabled_network_list,
101             memory=node['memory'], cpus=node['cpu'],
102             macs=node['mac'],
103             template_dir=template_dir)
104         virt_utils.host_setup({name: node['pm_port']})
105
106
107 def create_deploy_parser():
108     deploy_parser = argparse.ArgumentParser()
109     deploy_parser.add_argument('--debug', action='store_true', default=False,
110                                help="Turn on debug messages")
111     deploy_parser.add_argument('-l', '--log-file',
112                                default='./apex_deploy.log',
113                                dest='log_file', help="Log file to log to")
114     deploy_parser.add_argument('-d', '--deploy-settings',
115                                dest='deploy_settings_file',
116                                required=True,
117                                help='File which contains Apex deploy settings')
118     deploy_parser.add_argument('-n', '--network-settings',
119                                dest='network_settings_file',
120                                required=False,
121                                help='File which contains Apex network '
122                                     'settings')
123     deploy_parser.add_argument('-i', '--inventory-file',
124                                dest='inventory_file',
125                                default=None,
126                                help='Inventory file which contains POD '
127                                     'definition')
128     deploy_parser.add_argument('-e', '--environment-file',
129                                dest='env_file',
130                                default='opnfv-environment.yaml',
131                                help='Provide alternate base env file located '
132                                     'in deploy_dir')
133     deploy_parser.add_argument('-v', '--virtual', action='store_true',
134                                default=False,
135                                dest='virtual',
136                                help='Enable virtual deployment')
137     deploy_parser.add_argument('--interactive', action='store_true',
138                                default=False,
139                                help='Enable interactive deployment mode which '
140                                     'requires user to confirm steps of '
141                                     'deployment')
142     deploy_parser.add_argument('--virtual-computes',
143                                dest='virt_compute_nodes',
144                                default=1,
145                                type=int,
146                                help='Number of Virtual Compute nodes to create'
147                                     ' and use during deployment (defaults to 1'
148                                     ' for noha and 2 for ha)')
149     deploy_parser.add_argument('--virtual-cpus',
150                                dest='virt_cpus',
151                                default=4,
152                                type=int,
153                                help='Number of CPUs to use per Overcloud VM in'
154                                     ' a virtual deployment (defaults to 4)')
155     deploy_parser.add_argument('--virtual-default-ram',
156                                dest='virt_default_ram',
157                                default=8,
158                                type=int,
159                                help='Amount of default RAM to use per '
160                                     'Overcloud VM in GB (defaults to 8).')
161     deploy_parser.add_argument('--virtual-compute-ram',
162                                dest='virt_compute_ram',
163                                default=None,
164                                type=int,
165                                help='Amount of RAM to use per Overcloud '
166                                     'Compute VM in GB (defaults to 8). '
167                                     'Overrides --virtual-default-ram arg for '
168                                     'computes')
169     deploy_parser.add_argument('--deploy-dir',
170                                default='/usr/share/opnfv-apex',
171                                help='Directory to deploy from which contains '
172                                     'base config files for deployment')
173     deploy_parser.add_argument('--image-dir',
174                                default='/var/opt/opnfv/images',
175                                help='Directory which contains '
176                                     'base disk images for deployment')
177     deploy_parser.add_argument('--lib-dir',
178                                default='/usr/share/opnfv-apex',
179                                help='Directory path for apex ansible '
180                                     'and third party libs')
181     deploy_parser.add_argument('-s', '--snapshot', action='store_true',
182                                default=False,
183                                help='Use snapshots for deployment')
184     deploy_parser.add_argument('--snap-cache', dest='snap_cache',
185                                default="{}/snap_cache".format(
186                                    os.path.expanduser('~')),
187                                help='Local directory to cache snapshot '
188                                     'artifacts. Defaults to $HOME/snap_cache')
189     deploy_parser.add_argument('--upstream', action='store_true',
190                                default=True,
191                                help='Force deployment to use upstream '
192                                     'artifacts. This option is now '
193                                     'deprecated and only upstream '
194                                     'deployments are supported.')
195     deploy_parser.add_argument('--no-fetch', action='store_true',
196                                default=False,
197                                help='Ignore fetching latest upstream and '
198                                     'use what is in cache')
199     deploy_parser.add_argument('-p', '--patches',
200                                default='/etc/opnfv-apex/common-patches.yaml',
201                                dest='patches_file',
202                                help='File to include for common patches '
203                                     'which apply to all deployment scenarios')
204     return deploy_parser
205
206
207 def validate_deploy_args(args):
208     """
209     Validates arguments for deploy
210     :param args:
211     :return: None
212     """
213
214     logging.debug('Validating arguments for deployment')
215     if args.snapshot:
216         logging.debug('Skipping inventory validation as it is not applicable'
217                       'to snapshot deployments')
218     elif args.virtual and args.inventory_file is not None:
219         logging.error("Virtual enabled but inventory file also given")
220         raise ApexDeployException('You should not specify an inventory file '
221                                   'with virtual deployments')
222     elif args.virtual:
223         args.inventory_file = os.path.join(APEX_TEMP_DIR,
224                                            'inventory-virt.yaml')
225     elif not os.path.isfile(args.inventory_file):
226         logging.error("Specified inventory file does not exist: {}".format(
227             args.inventory_file))
228         raise ApexDeployException('Specified inventory file does not exist')
229
230     for settings_file in (args.deploy_settings_file,
231                           args.network_settings_file):
232         if settings_file == args.network_settings_file and args.snapshot:
233             continue
234         if os.path.isfile(settings_file) is False:
235             logging.error("Specified settings file does not "
236                           "exist: {}".format(settings_file))
237             raise ApexDeployException('Specified settings file does not '
238                                       'exist: {}'.format(settings_file))
239
240
241 def main():
242     parser = create_deploy_parser()
243     args = parser.parse_args(sys.argv[1:])
244     # FIXME (trozet): this is only needed as a workaround for CI.  Remove
245     # when CI is changed
246     if os.getenv('IMAGES', False):
247         args.image_dir = os.getenv('IMAGES')
248     if args.debug:
249         log_level = logging.DEBUG
250     else:
251         log_level = logging.INFO
252     os.makedirs(os.path.dirname(args.log_file), exist_ok=True)
253     formatter = '%(asctime)s %(levelname)s: %(message)s'
254     logging.basicConfig(filename=args.log_file,
255                         format=formatter,
256                         datefmt='%m/%d/%Y %I:%M:%S %p',
257                         level=log_level)
258     console = logging.StreamHandler()
259     console.setLevel(log_level)
260     console.setFormatter(logging.Formatter(formatter))
261     logging.getLogger('').addHandler(console)
262     utils.install_ansible()
263     validate_deploy_args(args)
264     # Parse all settings
265     deploy_settings = DeploySettings(args.deploy_settings_file)
266     logging.info("Deploy settings are:\n {}".format(pprint.pformat(
267         deploy_settings)))
268
269     if not args.snapshot:
270         net_settings = NetworkSettings(args.network_settings_file)
271         logging.info("Network settings are:\n {}".format(pprint.pformat(
272             net_settings)))
273         os_version = deploy_settings['deploy_options']['os_version']
274         net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
275         net_env = NetworkEnvironment(net_settings, net_env_file,
276                                      os_version=os_version)
277         net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
278         utils.dump_yaml(dict(net_env), net_env_target)
279
280         # get global deploy params
281         ha_enabled = deploy_settings['global_params']['ha_enabled']
282         introspect = deploy_settings['global_params'].get('introspect', True)
283         net_list = net_settings.enabled_network_list
284         if args.virtual:
285             if args.virt_compute_ram is None:
286                 compute_ram = args.virt_default_ram
287             else:
288                 compute_ram = args.virt_compute_ram
289             if (deploy_settings['deploy_options']['sdn_controller'] ==
290                     'opendaylight' and args.virt_default_ram < 12):
291                 control_ram = 12
292                 logging.warning('RAM per controller is too low.  OpenDaylight '
293                                 'requires at least 12GB per controller.')
294                 logging.info('Increasing RAM per controller to 12GB')
295             elif args.virt_default_ram < 10:
296                 if platform.machine() == 'aarch64':
297                     control_ram = 16
298                     logging.warning('RAM per controller is too low for '
299                                     'aarch64 ')
300                     logging.info('Increasing RAM per controller to 16GB')
301                 else:
302                     control_ram = 10
303                     logging.warning('RAM per controller is too low.  nosdn '
304                                     'requires at least 10GB per controller.')
305                     logging.info('Increasing RAM per controller to 10GB')
306             else:
307                 control_ram = args.virt_default_ram
308             if platform.machine() == 'aarch64' and args.virt_cpus < 16:
309                 vcpus = 16
310                 logging.warning('aarch64 requires at least 16 vCPUS per '
311                                 'target VM. Increasing to 16.')
312             else:
313                 vcpus = args.virt_cpus
314             if ha_enabled and args.virt_compute_nodes < 2:
315                 logging.debug(
316                     'HA enabled, bumping number of compute nodes to 2')
317                 args.virt_compute_nodes = 2
318             virt_utils.generate_inventory(args.inventory_file, ha_enabled,
319                                           num_computes=args.virt_compute_nodes,
320                                           controller_ram=control_ram * 1024,
321                                           compute_ram=compute_ram * 1024,
322                                           vcpus=vcpus
323                                           )
324         inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
325         logging.info("Inventory is:\n {}".format(pprint.pformat(
326             inventory)))
327
328         validate_cross_settings(deploy_settings, net_settings, inventory)
329     else:
330         # only one network with snapshots
331         net_list = [constants.ADMIN_NETWORK]
332
333     ds_opts = deploy_settings['deploy_options']
334     ansible_args = {
335         'virsh_enabled_networks': net_list,
336         'snapshot': args.snapshot
337     }
338     utils.run_ansible(ansible_args,
339                       os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
340                                    'deploy_dependencies.yml'))
341     if args.snapshot:
342         # Start snapshot Deployment
343         logging.info('Executing Snapshot Deployment...')
344         SnapshotDeployment(deploy_settings=deploy_settings,
345                            snap_cache_dir=args.snap_cache,
346                            fetch=not args.no_fetch,
347                            all_in_one=not bool(args.virt_compute_nodes))
348     else:
349         # Start Standard TripleO Deployment
350         deployment = ApexDeployment(deploy_settings, args.patches_file,
351                                     args.deploy_settings_file)
352         # TODO (trozet): add logic back from:
353         # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
354         uc_external = False
355         if 'external' in net_settings.enabled_network_list:
356             uc_external = True
357         if args.virtual:
358             # create all overcloud VMs
359             build_vms(inventory, net_settings, args.deploy_dir)
360         else:
361             # Attach interfaces to jumphost for baremetal deployment
362             jump_networks = ['admin']
363             if uc_external:
364                 jump_networks.append('external')
365             for network in jump_networks:
366                 if network == 'external':
367                     # TODO(trozet): enable vlan secondary external networks
368                     iface = net_settings['networks'][network][0][
369                         'installer_vm']['members'][0]
370                 else:
371                     iface = net_settings['networks'][network]['installer_vm'][
372                         'members'][0]
373                 bridge = "br-{}".format(network)
374                 jumphost.attach_interface_to_ovs(bridge, iface, network)
375         instackenv_json = os.path.join(APEX_TEMP_DIR, 'instackenv.json')
376         with open(instackenv_json, 'w') as fh:
377             json.dump(inventory, fh)
378
379         # Create and configure undercloud
380         if args.debug:
381             root_pw = constants.DEBUG_OVERCLOUD_PW
382         else:
383             root_pw = None
384
385         if not args.upstream:
386             logging.warning("Using upstream is now required for Apex. "
387                             "Forcing upstream to true")
388         if os_version == 'master':
389             branch = 'master'
390         else:
391             branch = "stable/{}".format(os_version)
392
393         logging.info("Deploying with upstream artifacts for OpenStack "
394                      "{}".format(os_version))
395         args.image_dir = os.path.join(args.image_dir, os_version)
396         upstream_url = constants.UPSTREAM_RDO.replace(
397             constants.DEFAULT_OS_VERSION, os_version)
398
399         upstream_targets = ['overcloud-full.tar', 'ironic-python-agent.tar']
400         if platform.machine() == 'aarch64':
401             upstream_targets.append('undercloud.qcow2')
402         utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
403                                         upstream_targets,
404                                         fetch=not args.no_fetch)
405         # Copy ironic files and overcloud ramdisk and kernel into temp dir
406         # to be copied by ansible into undercloud /home/stack
407         # Note the overcloud disk does not need to be copied here as it will
408         # be modified and copied later
409         for tmp_file in UC_DISK_FILES:
410             shutil.copyfile(os.path.join(args.image_dir, tmp_file),
411                             os.path.join(APEX_TEMP_DIR, tmp_file))
412         if platform.machine() == 'aarch64':
413             sdn_image = os.path.join(args.image_dir, 'undercloud.qcow2')
414         else:
415             sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
416         # copy undercloud so we don't taint upstream fetch
417         uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
418         uc_fetch_img = sdn_image
419         shutil.copyfile(uc_fetch_img, uc_image)
420         # prep undercloud with required packages
421         if platform.machine() != 'aarch64':
422             uc_builder.update_repos(image=uc_image,
423                                     branch=branch.replace('stable/', ''))
424         uc_builder.add_upstream_packages(uc_image)
425         uc_builder.inject_calipso_installer(APEX_TEMP_DIR, uc_image)
426         # add patches from upstream to undercloud and overcloud
427         logging.info('Adding patches to undercloud')
428         patches = deployment.determine_patches()
429         c_builder.add_upstream_patches(patches['undercloud'], uc_image,
430                                        APEX_TEMP_DIR, branch)
431
432         # Create/Start Undercloud VM
433         undercloud = uc_lib.Undercloud(args.image_dir,
434                                        args.deploy_dir,
435                                        root_pw=root_pw,
436                                        external_network=uc_external,
437                                        image_name=os.path.basename(uc_image),
438                                        os_version=os_version)
439         undercloud.start()
440         undercloud_admin_ip = net_settings['networks'][
441             constants.ADMIN_NETWORK]['installer_vm']['ip']
442
443         if ds_opts['containers']:
444             tag = constants.DOCKER_TAG
445         else:
446             tag = None
447
448         # Generate nic templates
449         for role in 'compute', 'controller':
450             oc_cfg.create_nic_template(net_settings, deploy_settings, role,
451                                        args.deploy_dir, APEX_TEMP_DIR)
452         # Prepare/Upload docker images
453         docker_env = 'containers-prepare-parameter.yaml'
454         shutil.copyfile(os.path.join(args.deploy_dir, docker_env),
455                         os.path.join(APEX_TEMP_DIR, docker_env))
456         # Upload extra ansible.cfg
457         if platform.machine() == 'aarch64':
458             ansible_env = 'ansible.cfg'
459             shutil.copyfile(os.path.join(args.deploy_dir, ansible_env),
460                             os.path.join(APEX_TEMP_DIR, ansible_env))
461
462         c_builder.prepare_container_images(
463             os.path.join(APEX_TEMP_DIR, docker_env),
464             branch=branch.replace('stable/', ''),
465             neutron_driver=c_builder.get_neutron_driver(ds_opts)
466         )
467         # Install Undercloud
468         undercloud.configure(net_settings, deploy_settings,
469                              os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
470                                           'configure_undercloud.yml'),
471                              APEX_TEMP_DIR, virtual_oc=args.virtual)
472
473         # Prepare overcloud-full.qcow2
474         logging.info("Preparing Overcloud for deployment...")
475         if os_version != 'ocata':
476             net_data_file = os.path.join(APEX_TEMP_DIR, 'network_data.yaml')
477             net_data = network_data.create_network_data(net_settings,
478                                                         net_data_file)
479         else:
480             net_data = False
481
482         shutil.copyfile(os.path.join(args.deploy_dir, 'build_ovs_nsh.sh'),
483                         os.path.join(APEX_TEMP_DIR, 'build_ovs_nsh.sh'))
484
485         # TODO(trozet): Either fix opnfv env or default to use upstream env
486         if args.env_file == 'opnfv-environment.yaml':
487             # Override the env_file if it is defaulted to opnfv
488             # opnfv env file will not work with upstream
489             args.env_file = 'upstream-environment.yaml'
490         opnfv_env = os.path.join(args.deploy_dir, args.env_file)
491         oc_deploy.prep_env(deploy_settings, net_settings, inventory,
492                            opnfv_env, net_env_target, APEX_TEMP_DIR)
493         if not args.virtual:
494             oc_deploy.LOOP_DEVICE_SIZE = "50G"
495         if platform.machine() == 'aarch64':
496             oc_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
497         else:
498             oc_image = sdn_image
499         patched_containers = oc_deploy.prep_image(
500             deploy_settings, net_settings, oc_image, APEX_TEMP_DIR,
501             root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'])
502
503         oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
504                                     APEX_TEMP_DIR, args.virtual,
505                                     os.path.basename(opnfv_env),
506                                     net_data=net_data)
507         # Prepare undercloud with containers
508         docker_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
509                                        'prepare_overcloud_containers.yml')
510         if ds_opts['containers']:
511             logging.info("Preparing Undercloud with Docker containers")
512             sdn_env = oc_deploy.get_docker_sdn_files(ds_opts)
513             sdn_env_files = str()
514             for sdn_file in sdn_env:
515                 sdn_env_files += " -e {}".format(sdn_file)
516             if patched_containers:
517                 oc_builder.archive_docker_patches(APEX_TEMP_DIR)
518             container_vars = dict()
519             container_vars['apex_temp_dir'] = APEX_TEMP_DIR
520             container_vars['patched_docker_services'] = list(
521                 patched_containers)
522             container_vars['container_tag'] = constants.DOCKER_TAG
523             container_vars['stackrc'] = 'source /home/stack/stackrc'
524             container_vars['sdn'] = ds_opts['sdn_controller']
525             container_vars['undercloud_ip'] = undercloud_admin_ip
526             container_vars['os_version'] = os_version
527             container_vars['aarch64'] = platform.machine() == 'aarch64'
528             container_vars['sdn_env_file'] = sdn_env_files
529             try:
530                 utils.run_ansible(container_vars, docker_playbook,
531                                   host=undercloud.ip, user='stack',
532                                   tmp_dir=APEX_TEMP_DIR)
533                 logging.info("Container preparation complete")
534             except Exception:
535                 logging.error("Unable to complete container prep on "
536                               "Undercloud")
537                 for tmp_file in UC_DISK_FILES:
538                     os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
539                 os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
540                 raise
541
542         deploy_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
543                                        'deploy_overcloud.yml')
544         virt_env = 'virtual-environment.yaml'
545         bm_env = 'baremetal-environment.yaml'
546         k8s_env = 'kubernetes-environment.yaml'
547         for p_env in virt_env, bm_env, k8s_env:
548             shutil.copyfile(os.path.join(args.deploy_dir, p_env),
549                             os.path.join(APEX_TEMP_DIR, p_env))
550
551         # Start Overcloud Deployment
552         logging.info("Executing Overcloud Deployment...")
553         deploy_vars = dict()
554         deploy_vars['virtual'] = args.virtual
555         deploy_vars['debug'] = args.debug
556         deploy_vars['aarch64'] = platform.machine() == 'aarch64'
557         deploy_vars['introspect'] = not (args.virtual or
558                                          deploy_vars['aarch64'] or
559                                          not introspect)
560         deploy_vars['dns_server_args'] = ''
561         deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
562         deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
563         deploy_vars['stackrc'] = 'source /home/stack/stackrc'
564         deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
565         deploy_vars['undercloud_ip'] = undercloud_admin_ip
566         deploy_vars['ha_enabled'] = ha_enabled
567         deploy_vars['os_version'] = os_version
568         deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
569         deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
570         deploy_vars['vim'] = ds_opts['vim']
571         for dns_server in net_settings['dns_servers']:
572             deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
573                 dns_server)
574         try:
575             utils.run_ansible(deploy_vars, deploy_playbook, host=undercloud.ip,
576                               user='stack', tmp_dir=APEX_TEMP_DIR)
577             logging.info("Overcloud deployment complete")
578         except Exception:
579             logging.error("Deployment Failed.  Please check deploy log as "
580                           "well as mistral logs in "
581                           "{}".format(os.path.join(APEX_TEMP_DIR,
582                                                    'mistral_logs.tar.gz')))
583             raise
584         finally:
585             os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
586             for tmp_file in UC_DISK_FILES:
587                 os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
588
589         # Post install
590         logging.info("Executing post deploy configuration")
591         jumphost.configure_bridges(net_settings)
592         nova_output = os.path.join(APEX_TEMP_DIR, 'nova_output')
593         deploy_vars['overcloud_nodes'] = parsers.parse_nova_output(
594             nova_output)
595         deploy_vars['SSH_OPTIONS'] = '-o StrictHostKeyChecking=no -o ' \
596                                      'GlobalKnownHostsFile=/dev/null -o ' \
597                                      'UserKnownHostsFile=/dev/null -o ' \
598                                      'LogLevel=error'
599         deploy_vars['external_network_cmds'] = \
600             oc_deploy.external_network_cmds(net_settings, deploy_settings)
601         # TODO(trozet): just parse all ds_opts as deploy vars one time
602         deploy_vars['gluon'] = ds_opts['gluon']
603         deploy_vars['sdn'] = ds_opts['sdn_controller']
604         for dep_option in 'yardstick', 'dovetail', 'vsperf':
605             if dep_option in ds_opts:
606                 deploy_vars[dep_option] = ds_opts[dep_option]
607             else:
608                 deploy_vars[dep_option] = False
609         deploy_vars['dataplane'] = ds_opts['dataplane']
610         overcloudrc = os.path.join(APEX_TEMP_DIR, 'overcloudrc')
611         if ds_opts['congress']:
612             deploy_vars['congress_datasources'] = \
613                 oc_deploy.create_congress_cmds(overcloudrc)
614             deploy_vars['congress'] = True
615         else:
616             deploy_vars['congress'] = False
617         deploy_vars['calipso'] = ds_opts.get('calipso', False)
618         deploy_vars['calipso_ip'] = undercloud_admin_ip
619         # overcloudrc.v3 removed and set as default in queens and later
620         if os_version == 'pike':
621             deploy_vars['overcloudrc_files'] = ['overcloudrc',
622                                                 'overcloudrc.v3']
623         else:
624             deploy_vars['overcloudrc_files'] = ['overcloudrc']
625
626         post_undercloud = os.path.join(args.lib_dir,
627                                        constants.ANSIBLE_PATH,
628                                        'post_deploy_undercloud.yml')
629         logging.info("Executing post deploy configuration undercloud "
630                      "playbook")
631         try:
632             utils.run_ansible(deploy_vars, post_undercloud,
633                               host=undercloud.ip, user='stack',
634                               tmp_dir=APEX_TEMP_DIR)
635             logging.info("Post Deploy Undercloud Configuration Complete")
636         except Exception:
637             logging.error("Post Deploy Undercloud Configuration failed.  "
638                           "Please check log")
639             raise
640
641         # Deploy kubernetes if enabled
642         # (TODO)zshi move handling of kubernetes deployment
643         # to its own deployment class
644         if deploy_vars['vim'] == 'k8s':
645             # clone kubespray repo
646             git.Repo.clone_from(constants.KUBESPRAY_URL,
647                                 os.path.join(APEX_TEMP_DIR, 'kubespray'))
648             shutil.copytree(
649                 os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
650                              'sample'),
651                 os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
652                              'apex'))
653             k8s_node_inventory = {
654                 'all':
655                     {'hosts': {},
656                      'children': {
657                          'k8s-cluster': {
658                              'children': {
659                                  'kube-master': {
660                                      'hosts': {}
661                                  },
662                                  'kube-node': {
663                                      'hosts': {}
664                                  }
665                              }
666                          },
667                          'etcd': {
668                              'hosts': {}
669                          }
670                     }
671                     }
672             }
673             for node, ip in deploy_vars['overcloud_nodes'].items():
674                 k8s_node_inventory['all']['hosts'][node] = {
675                     'ansible_become': True,
676                     'ansible_ssh_host': ip,
677                     'ansible_become_user': 'root',
678                     'ip': ip
679                 }
680                 if 'controller' in node:
681                     k8s_node_inventory['all']['children']['k8s-cluster'][
682                         'children']['kube-master']['hosts'][node] = None
683                     k8s_node_inventory['all']['children']['etcd'][
684                         'hosts'][node] = None
685                 elif 'compute' in node:
686                     k8s_node_inventory['all']['children']['k8s-cluster'][
687                         'children']['kube-node']['hosts'][node] = None
688
689             kubespray_dir = os.path.join(APEX_TEMP_DIR, 'kubespray')
690             with open(os.path.join(kubespray_dir, 'inventory', 'apex',
691                                    'apex.yaml'), 'w') as invfile:
692                 yaml.dump(k8s_node_inventory, invfile,
693                           default_flow_style=False)
694             k8s_deploy_vars = {}
695             # Add kubespray ansible control variables in k8s_deploy_vars,
696             # example: 'kube_network_plugin': 'flannel'
697             k8s_deploy = os.path.join(kubespray_dir, 'cluster.yml')
698             k8s_deploy_inv_file = os.path.join(kubespray_dir, 'inventory',
699                                                'apex', 'apex.yaml')
700
701             k8s_remove_pkgs = os.path.join(args.lib_dir,
702                                            constants.ANSIBLE_PATH,
703                                            'k8s_remove_pkgs.yml')
704             try:
705                 logging.debug("Removing any existing overcloud docker "
706                               "packages")
707                 utils.run_ansible(k8s_deploy_vars, k8s_remove_pkgs,
708                                   host=k8s_deploy_inv_file,
709                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
710                 logging.info("k8s Deploy Remove Existing Docker Related "
711                              "Packages Complete")
712             except Exception:
713                 logging.error("k8s Deploy Remove Existing Docker Related "
714                               "Packages failed. Please check log")
715                 raise
716
717             try:
718                 utils.run_ansible(k8s_deploy_vars, k8s_deploy,
719                                   host=k8s_deploy_inv_file,
720                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
721                 logging.info("k8s Deploy Overcloud Configuration Complete")
722             except Exception:
723                 logging.error("k8s Deploy Overcloud Configuration failed."
724                               "Please check log")
725                 raise
726
727         # Post deploy overcloud node configuration
728         # TODO(trozet): just parse all ds_opts as deploy vars one time
729         deploy_vars['sfc'] = ds_opts['sfc']
730         deploy_vars['vpn'] = ds_opts['vpn']
731         deploy_vars['l2gw'] = ds_opts.get('l2gw')
732         deploy_vars['sriov'] = ds_opts.get('sriov')
733         deploy_vars['tacker'] = ds_opts.get('tacker')
734         # TODO(trozet): pull all logs and store in tmp dir in overcloud
735         # playbook
736         post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
737                                       'post_deploy_overcloud.yml')
738         # Run per overcloud node
739         for node, ip in deploy_vars['overcloud_nodes'].items():
740             logging.info("Executing Post deploy overcloud playbook on "
741                          "node {}".format(node))
742             try:
743                 utils.run_ansible(deploy_vars, post_overcloud, host=ip,
744                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
745                 logging.info("Post Deploy Overcloud Configuration Complete "
746                              "for node {}".format(node))
747             except Exception:
748                 logging.error("Post Deploy Overcloud Configuration failed "
749                               "for node {}. Please check log".format(node))
750                 raise
751         logging.info("Apex deployment complete")
752         logging.info("Undercloud IP: {}, please connect by doing "
753                      "'opnfv-util undercloud'".format(undercloud.ip))
754         # TODO(trozet): add logging here showing controller VIP and horizon url
755
756
757 if __name__ == '__main__':
758     main()