f5d64820c620df93af5f3eafa21a854a6ac00277
[apex.git] / apex / deploy.py
1 #!/usr/bin/env python
2
3 ##############################################################################
4 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
5 #
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 ##############################################################################
11
12 import argparse
13 import git
14 import json
15 import logging
16 import os
17 import platform
18 import pprint
19 import shutil
20 import sys
21 import tempfile
22 import yaml
23
24 import apex.virtual.configure_vm as vm_lib
25 import apex.virtual.utils as virt_utils
26 import apex.builders.common_builder as c_builder
27 import apex.builders.overcloud_builder as oc_builder
28 import apex.builders.undercloud_builder as uc_builder
29 from apex import DeploySettings
30 from apex import Inventory
31 from apex import NetworkEnvironment
32 from apex import NetworkSettings
33 from apex.deployment.snapshot import SnapshotDeployment
34 from apex.common import utils
35 from apex.common import constants
36 from apex.common import parsers
37 from apex.common.exceptions import ApexDeployException
38 from apex.deployment.tripleo import ApexDeployment
39 from apex.network import jumphost
40 from apex.network import network_data
41 from apex.undercloud import undercloud as uc_lib
42 from apex.overcloud import config as oc_cfg
43 from apex.overcloud import deploy as oc_deploy
44
45 APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
46 SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
47 UC_DISK_FILES = [
48     'overcloud-full.vmlinuz',
49     'overcloud-full.initrd',
50     'ironic-python-agent.initramfs',
51     'ironic-python-agent.kernel'
52 ]
53
54
55 def validate_cross_settings(deploy_settings, net_settings, inventory):
56     """
57     Used to validate compatibility across settings file.
58     :param deploy_settings: parsed settings for deployment
59     :param net_settings: parsed settings for network
60     :param inventory: parsed inventory file
61     :return: None
62     """
63
64     if deploy_settings['deploy_options']['dataplane'] != 'ovs' and 'tenant' \
65             not in net_settings.enabled_network_list:
66         raise ApexDeployException("Setting a DPDK based dataplane requires"
67                                   "a dedicated NIC for tenant network")
68
69     if 'odl_vpp_routing_node' in deploy_settings['deploy_options']:
70         if deploy_settings['deploy_options']['dataplane'] != 'fdio':
71             raise ApexDeployException("odl_vpp_routing_node should only be set"
72                                       "when dataplane is set to fdio")
73         if deploy_settings['deploy_options'].get('dvr') is True:
74             raise ApexDeployException("odl_vpp_routing_node should only be set"
75                                       "when dvr is not enabled")
76
77     # TODO(trozet): add more checks here like RAM for ODL, etc
78     # check if odl_vpp_netvirt is true and vpp is set
79     # Check if fdio and nosdn:
80     # tenant_nic_mapping_controller_members" ==
81     # "$tenant_nic_mapping_compute_members
82
83
84 def build_vms(inventory, network_settings,
85               template_dir='/usr/share/opnfv-apex'):
86     """
87     Creates VMs and configures vbmc and host
88     :param inventory:
89     :param network_settings:
90     :return:
91     """
92
93     for idx, node in enumerate(inventory['nodes']):
94         name = 'baremetal{}'.format(idx)
95         volume = name + ".qcow2"
96         volume_path = os.path.join(constants.LIBVIRT_VOLUME_PATH, volume)
97         # TODO(trozet): add error checking
98         vm_lib.create_vm(
99             name, volume_path,
100             baremetal_interfaces=network_settings.enabled_network_list,
101             memory=node['memory'], cpus=node['cpu'],
102             macs=node['mac'],
103             template_dir=template_dir)
104         virt_utils.host_setup({name: node['pm_port']})
105
106
107 def create_deploy_parser():
108     deploy_parser = argparse.ArgumentParser()
109     deploy_parser.add_argument('--debug', action='store_true', default=False,
110                                help="Turn on debug messages")
111     deploy_parser.add_argument('-l', '--log-file',
112                                default='./apex_deploy.log',
113                                dest='log_file', help="Log file to log to")
114     deploy_parser.add_argument('-d', '--deploy-settings',
115                                dest='deploy_settings_file',
116                                required=True,
117                                help='File which contains Apex deploy settings')
118     deploy_parser.add_argument('-n', '--network-settings',
119                                dest='network_settings_file',
120                                required=False,
121                                help='File which contains Apex network '
122                                     'settings')
123     deploy_parser.add_argument('-i', '--inventory-file',
124                                dest='inventory_file',
125                                default=None,
126                                help='Inventory file which contains POD '
127                                     'definition')
128     deploy_parser.add_argument('-e', '--environment-file',
129                                dest='env_file',
130                                default='opnfv-environment.yaml',
131                                help='Provide alternate base env file located '
132                                     'in deploy_dir')
133     deploy_parser.add_argument('-v', '--virtual', action='store_true',
134                                default=False,
135                                dest='virtual',
136                                help='Enable virtual deployment')
137     deploy_parser.add_argument('--interactive', action='store_true',
138                                default=False,
139                                help='Enable interactive deployment mode which '
140                                     'requires user to confirm steps of '
141                                     'deployment')
142     deploy_parser.add_argument('--virtual-computes',
143                                dest='virt_compute_nodes',
144                                default=1,
145                                type=int,
146                                help='Number of Virtual Compute nodes to create'
147                                     ' and use during deployment (defaults to 1'
148                                     ' for noha and 2 for ha)')
149     deploy_parser.add_argument('--virtual-cpus',
150                                dest='virt_cpus',
151                                default=4,
152                                type=int,
153                                help='Number of CPUs to use per Overcloud VM in'
154                                     ' a virtual deployment (defaults to 4)')
155     deploy_parser.add_argument('--virtual-default-ram',
156                                dest='virt_default_ram',
157                                default=8,
158                                type=int,
159                                help='Amount of default RAM to use per '
160                                     'Overcloud VM in GB (defaults to 8).')
161     deploy_parser.add_argument('--virtual-compute-ram',
162                                dest='virt_compute_ram',
163                                default=None,
164                                type=int,
165                                help='Amount of RAM to use per Overcloud '
166                                     'Compute VM in GB (defaults to 8). '
167                                     'Overrides --virtual-default-ram arg for '
168                                     'computes')
169     deploy_parser.add_argument('--deploy-dir',
170                                default='/usr/share/opnfv-apex',
171                                help='Directory to deploy from which contains '
172                                     'base config files for deployment')
173     deploy_parser.add_argument('--image-dir',
174                                default='/var/opt/opnfv/images',
175                                help='Directory which contains '
176                                     'base disk images for deployment')
177     deploy_parser.add_argument('--lib-dir',
178                                default='/usr/share/opnfv-apex',
179                                help='Directory path for apex ansible '
180                                     'and third party libs')
181     deploy_parser.add_argument('-s', '--snapshot', action='store_true',
182                                default=False,
183                                help='Use snapshots for deployment')
184     deploy_parser.add_argument('--snap-cache', dest='snap_cache',
185                                default="{}/snap_cache".format(
186                                    os.path.expanduser('~')),
187                                help='Local directory to cache snapshot '
188                                     'artifacts. Defaults to $HOME/snap_cache')
189     deploy_parser.add_argument('--upstream', action='store_true',
190                                default=True,
191                                help='Force deployment to use upstream '
192                                     'artifacts. This option is now '
193                                     'deprecated and only upstream '
194                                     'deployments are supported.')
195     deploy_parser.add_argument('--no-fetch', action='store_true',
196                                default=False,
197                                help='Ignore fetching latest upstream and '
198                                     'use what is in cache')
199     deploy_parser.add_argument('-p', '--patches',
200                                default='/etc/opnfv-apex/common-patches.yaml',
201                                dest='patches_file',
202                                help='File to include for common patches '
203                                     'which apply to all deployment scenarios')
204     return deploy_parser
205
206
207 def validate_deploy_args(args):
208     """
209     Validates arguments for deploy
210     :param args:
211     :return: None
212     """
213
214     logging.debug('Validating arguments for deployment')
215     if args.snapshot:
216         logging.debug('Skipping inventory validation as it is not applicable'
217                       'to snapshot deployments')
218     elif args.virtual and args.inventory_file is not None:
219         logging.error("Virtual enabled but inventory file also given")
220         raise ApexDeployException('You should not specify an inventory file '
221                                   'with virtual deployments')
222     elif args.virtual:
223         args.inventory_file = os.path.join(APEX_TEMP_DIR,
224                                            'inventory-virt.yaml')
225     elif not os.path.isfile(args.inventory_file):
226         logging.error("Specified inventory file does not exist: {}".format(
227             args.inventory_file))
228         raise ApexDeployException('Specified inventory file does not exist')
229
230     for settings_file in (args.deploy_settings_file,
231                           args.network_settings_file):
232         if settings_file == args.network_settings_file and args.snapshot:
233             continue
234         if os.path.isfile(settings_file) is False:
235             logging.error("Specified settings file does not "
236                           "exist: {}".format(settings_file))
237             raise ApexDeployException('Specified settings file does not '
238                                       'exist: {}'.format(settings_file))
239
240
241 def main():
242     parser = create_deploy_parser()
243     args = parser.parse_args(sys.argv[1:])
244     # FIXME (trozet): this is only needed as a workaround for CI.  Remove
245     # when CI is changed
246     if os.getenv('IMAGES', False):
247         args.image_dir = os.getenv('IMAGES')
248     if args.debug:
249         log_level = logging.DEBUG
250     else:
251         log_level = logging.INFO
252     os.makedirs(os.path.dirname(args.log_file), exist_ok=True)
253     formatter = '%(asctime)s %(levelname)s: %(message)s'
254     logging.basicConfig(filename=args.log_file,
255                         format=formatter,
256                         datefmt='%m/%d/%Y %I:%M:%S %p',
257                         level=log_level)
258     console = logging.StreamHandler()
259     console.setLevel(log_level)
260     console.setFormatter(logging.Formatter(formatter))
261     logging.getLogger('').addHandler(console)
262     utils.install_ansible()
263     validate_deploy_args(args)
264     # Parse all settings
265     deploy_settings = DeploySettings(args.deploy_settings_file)
266     logging.info("Deploy settings are:\n {}".format(pprint.pformat(
267         deploy_settings)))
268
269     if not args.snapshot:
270         net_settings = NetworkSettings(args.network_settings_file)
271         logging.info("Network settings are:\n {}".format(pprint.pformat(
272             net_settings)))
273         os_version = deploy_settings['deploy_options']['os_version']
274         net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
275         net_env = NetworkEnvironment(net_settings, net_env_file,
276                                      os_version=os_version)
277         net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
278         utils.dump_yaml(dict(net_env), net_env_target)
279
280         # get global deploy params
281         ha_enabled = deploy_settings['global_params']['ha_enabled']
282         introspect = deploy_settings['global_params'].get('introspect', True)
283         net_list = net_settings.enabled_network_list
284         if args.virtual:
285             if args.virt_compute_ram is None:
286                 compute_ram = args.virt_default_ram
287             else:
288                 compute_ram = args.virt_compute_ram
289             if (deploy_settings['deploy_options']['sdn_controller'] ==
290                     'opendaylight' and args.virt_default_ram < 12):
291                 control_ram = 12
292                 logging.warning('RAM per controller is too low.  OpenDaylight '
293                                 'requires at least 12GB per controller.')
294                 logging.info('Increasing RAM per controller to 12GB')
295             elif args.virt_default_ram < 10:
296                 if platform.machine() == 'aarch64':
297                     control_ram = 16
298                     logging.warning('RAM per controller is too low for '
299                                     'aarch64 ')
300                     logging.info('Increasing RAM per controller to 16GB')
301                 else:
302                     control_ram = 10
303                     logging.warning('RAM per controller is too low.  nosdn '
304                                     'requires at least 10GB per controller.')
305                     logging.info('Increasing RAM per controller to 10GB')
306             else:
307                 control_ram = args.virt_default_ram
308             if platform.machine() == 'aarch64' and args.virt_cpus < 16:
309                 vcpus = 16
310                 logging.warning('aarch64 requires at least 16 vCPUS per '
311                                 'target VM. Increasing to 16.')
312             else:
313                 vcpus = args.virt_cpus
314             if ha_enabled and args.virt_compute_nodes < 2:
315                 logging.debug(
316                     'HA enabled, bumping number of compute nodes to 2')
317                 args.virt_compute_nodes = 2
318             virt_utils.generate_inventory(args.inventory_file, ha_enabled,
319                                           num_computes=args.virt_compute_nodes,
320                                           controller_ram=control_ram * 1024,
321                                           compute_ram=compute_ram * 1024,
322                                           vcpus=vcpus
323                                           )
324         inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
325         logging.info("Inventory is:\n {}".format(pprint.pformat(
326             inventory)))
327
328         validate_cross_settings(deploy_settings, net_settings, inventory)
329     else:
330         # only one network with snapshots
331         net_list = [constants.ADMIN_NETWORK]
332
333     ds_opts = deploy_settings['deploy_options']
334     ansible_args = {
335         'virsh_enabled_networks': net_list,
336         'snapshot': args.snapshot
337     }
338     utils.run_ansible(ansible_args,
339                       os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
340                                    'deploy_dependencies.yml'))
341     all_in_one = not bool(args.virt_compute_nodes)
342     if args.snapshot:
343         # Start snapshot Deployment
344         logging.info('Executing Snapshot Deployment...')
345         SnapshotDeployment(deploy_settings=deploy_settings,
346                            snap_cache_dir=args.snap_cache,
347                            fetch=not args.no_fetch,
348                            all_in_one=all_in_one)
349     else:
350         # Start Standard TripleO Deployment
351         deployment = ApexDeployment(deploy_settings, args.patches_file,
352                                     args.deploy_settings_file)
353         # TODO (trozet): add logic back from:
354         # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
355         uc_external = False
356         if 'external' in net_settings.enabled_network_list:
357             uc_external = True
358         if args.virtual:
359             # create all overcloud VMs
360             build_vms(inventory, net_settings, args.deploy_dir)
361         else:
362             # Attach interfaces to jumphost for baremetal deployment
363             jump_networks = ['admin']
364             if uc_external:
365                 jump_networks.append('external')
366             for network in jump_networks:
367                 if network == 'external':
368                     # TODO(trozet): enable vlan secondary external networks
369                     iface = net_settings['networks'][network][0][
370                         'installer_vm']['members'][0]
371                 else:
372                     iface = net_settings['networks'][network]['installer_vm'][
373                         'members'][0]
374                 bridge = "br-{}".format(network)
375                 jumphost.attach_interface_to_ovs(bridge, iface, network)
376         instackenv_json = os.path.join(APEX_TEMP_DIR, 'instackenv.json')
377         with open(instackenv_json, 'w') as fh:
378             json.dump(inventory, fh)
379
380         # Create and configure undercloud
381         if args.debug:
382             root_pw = constants.DEBUG_OVERCLOUD_PW
383         else:
384             root_pw = None
385
386         if not args.upstream:
387             logging.warning("Using upstream is now required for Apex. "
388                             "Forcing upstream to true")
389         if os_version == 'master':
390             branch = 'master'
391         else:
392             branch = "stable/{}".format(os_version)
393
394         logging.info("Deploying with upstream artifacts for OpenStack "
395                      "{}".format(os_version))
396         args.image_dir = os.path.join(args.image_dir, os_version)
397         upstream_url = constants.UPSTREAM_RDO.replace(
398             constants.DEFAULT_OS_VERSION, os_version)
399
400         upstream_targets = ['overcloud-full.tar', 'ironic-python-agent.tar']
401         if platform.machine() == 'aarch64':
402             upstream_targets.append('undercloud.qcow2')
403         utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
404                                         upstream_targets,
405                                         fetch=not args.no_fetch)
406         # Copy ironic files and overcloud ramdisk and kernel into temp dir
407         # to be copied by ansible into undercloud /home/stack
408         # Note the overcloud disk does not need to be copied here as it will
409         # be modified and copied later
410         for tmp_file in UC_DISK_FILES:
411             shutil.copyfile(os.path.join(args.image_dir, tmp_file),
412                             os.path.join(APEX_TEMP_DIR, tmp_file))
413         if platform.machine() == 'aarch64':
414             sdn_image = os.path.join(args.image_dir, 'undercloud.qcow2')
415         else:
416             sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
417         # copy undercloud so we don't taint upstream fetch
418         uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
419         uc_fetch_img = sdn_image
420         shutil.copyfile(uc_fetch_img, uc_image)
421         # prep undercloud with required packages
422         if platform.machine() != 'aarch64':
423             uc_builder.update_repos(image=uc_image,
424                                     branch=branch.replace('stable/', ''))
425         uc_builder.add_upstream_packages(uc_image)
426         uc_builder.inject_calipso_installer(APEX_TEMP_DIR, uc_image)
427         # add patches from upstream to undercloud and overcloud
428         logging.info('Adding patches to undercloud')
429         patches = deployment.determine_patches()
430         c_builder.add_upstream_patches(patches['undercloud'], uc_image,
431                                        APEX_TEMP_DIR, branch)
432
433         # Create/Start Undercloud VM
434         undercloud = uc_lib.Undercloud(args.image_dir,
435                                        args.deploy_dir,
436                                        root_pw=root_pw,
437                                        external_network=uc_external,
438                                        image_name=os.path.basename(uc_image),
439                                        os_version=os_version)
440         undercloud.start()
441         undercloud_admin_ip = net_settings['networks'][
442             constants.ADMIN_NETWORK]['installer_vm']['ip']
443
444         if ds_opts['containers']:
445             tag = constants.DOCKER_TAG
446         else:
447             tag = None
448
449         # Generate nic templates
450         for role in 'compute', 'controller':
451             oc_cfg.create_nic_template(net_settings, deploy_settings, role,
452                                        args.deploy_dir, APEX_TEMP_DIR)
453         # Prepare/Upload docker images
454         docker_env = 'containers-prepare-parameter.yaml'
455         shutil.copyfile(os.path.join(args.deploy_dir, docker_env),
456                         os.path.join(APEX_TEMP_DIR, docker_env))
457         # Upload extra ansible.cfg
458         if platform.machine() == 'aarch64':
459             ansible_env = 'ansible.cfg'
460             shutil.copyfile(os.path.join(args.deploy_dir, ansible_env),
461                             os.path.join(APEX_TEMP_DIR, ansible_env))
462
463         c_builder.prepare_container_images(
464             os.path.join(APEX_TEMP_DIR, docker_env),
465             branch=branch.replace('stable/', ''),
466             neutron_driver=c_builder.get_neutron_driver(ds_opts)
467         )
468         # Install Undercloud
469         undercloud.configure(net_settings, deploy_settings,
470                              os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
471                                           'configure_undercloud.yml'),
472                              APEX_TEMP_DIR, virtual_oc=args.virtual)
473
474         # Prepare overcloud-full.qcow2
475         logging.info("Preparing Overcloud for deployment...")
476         if os_version != 'ocata':
477             net_data_file = os.path.join(APEX_TEMP_DIR, 'network_data.yaml')
478             net_data = network_data.create_network_data(net_settings,
479                                                         net_data_file)
480         else:
481             net_data = False
482
483         shutil.copyfile(os.path.join(args.deploy_dir, 'build_ovs_nsh.sh'),
484                         os.path.join(APEX_TEMP_DIR, 'build_ovs_nsh.sh'))
485
486         # TODO(trozet): Either fix opnfv env or default to use upstream env
487         if args.env_file == 'opnfv-environment.yaml':
488             # Override the env_file if it is defaulted to opnfv
489             # opnfv env file will not work with upstream
490             args.env_file = 'upstream-environment.yaml'
491         opnfv_env = os.path.join(args.deploy_dir, args.env_file)
492         oc_deploy.prep_env(deploy_settings, net_settings, inventory,
493                            opnfv_env, net_env_target, APEX_TEMP_DIR)
494         if not args.virtual:
495             oc_deploy.LOOP_DEVICE_SIZE = "50G"
496         if platform.machine() == 'aarch64':
497             oc_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
498         else:
499             oc_image = sdn_image
500         patched_containers = oc_deploy.prep_image(
501             deploy_settings, net_settings, oc_image, APEX_TEMP_DIR,
502             root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'])
503
504         oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
505                                     APEX_TEMP_DIR, args.virtual,
506                                     os.path.basename(opnfv_env),
507                                     net_data=net_data)
508         # Prepare undercloud with containers
509         docker_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
510                                        'prepare_overcloud_containers.yml')
511         if ds_opts['containers']:
512             logging.info("Preparing Undercloud with Docker containers")
513             sdn_env = oc_deploy.get_docker_sdn_files(ds_opts)
514             sdn_env_files = str()
515             for sdn_file in sdn_env:
516                 sdn_env_files += " -e {}".format(sdn_file)
517             if patched_containers:
518                 oc_builder.archive_docker_patches(APEX_TEMP_DIR)
519             container_vars = dict()
520             container_vars['apex_temp_dir'] = APEX_TEMP_DIR
521             container_vars['patched_docker_services'] = list(
522                 patched_containers)
523             container_vars['container_tag'] = constants.DOCKER_TAG
524             container_vars['stackrc'] = 'source /home/stack/stackrc'
525             container_vars['sdn'] = ds_opts['sdn_controller']
526             container_vars['undercloud_ip'] = undercloud_admin_ip
527             container_vars['os_version'] = os_version
528             container_vars['aarch64'] = platform.machine() == 'aarch64'
529             container_vars['sdn_env_file'] = sdn_env_files
530             try:
531                 utils.run_ansible(container_vars, docker_playbook,
532                                   host=undercloud.ip, user='stack',
533                                   tmp_dir=APEX_TEMP_DIR)
534                 logging.info("Container preparation complete")
535             except Exception:
536                 logging.error("Unable to complete container prep on "
537                               "Undercloud")
538                 for tmp_file in UC_DISK_FILES:
539                     os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
540                 os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
541                 raise
542
543         deploy_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
544                                        'deploy_overcloud.yml')
545         virt_env = 'virtual-environment.yaml'
546         bm_env = 'baremetal-environment.yaml'
547         k8s_env = 'kubernetes-environment.yaml'
548         for p_env in virt_env, bm_env, k8s_env:
549             shutil.copyfile(os.path.join(args.deploy_dir, p_env),
550                             os.path.join(APEX_TEMP_DIR, p_env))
551
552         # Start Overcloud Deployment
553         logging.info("Executing Overcloud Deployment...")
554         deploy_vars = dict()
555         deploy_vars['virtual'] = args.virtual
556         deploy_vars['debug'] = args.debug
557         deploy_vars['aarch64'] = platform.machine() == 'aarch64'
558         deploy_vars['introspect'] = not (args.virtual or
559                                          deploy_vars['aarch64'] or
560                                          not introspect)
561         deploy_vars['dns_server_args'] = ''
562         deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
563         deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
564         deploy_vars['stackrc'] = 'source /home/stack/stackrc'
565         deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
566         deploy_vars['undercloud_ip'] = undercloud_admin_ip
567         deploy_vars['ha_enabled'] = ha_enabled
568         deploy_vars['os_version'] = os_version
569         deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
570         deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
571         deploy_vars['vim'] = ds_opts['vim']
572         for dns_server in net_settings['dns_servers']:
573             deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
574                 dns_server)
575         try:
576             utils.run_ansible(deploy_vars, deploy_playbook, host=undercloud.ip,
577                               user='stack', tmp_dir=APEX_TEMP_DIR)
578             logging.info("Overcloud deployment complete")
579         except Exception:
580             logging.error("Deployment Failed.  Please check deploy log as "
581                           "well as mistral logs in "
582                           "{}".format(os.path.join(APEX_TEMP_DIR,
583                                                    'mistral_logs.tar.gz')))
584             raise
585         finally:
586             os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
587             for tmp_file in UC_DISK_FILES:
588                 os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
589
590         # Post install
591         logging.info("Executing post deploy configuration")
592         jumphost.configure_bridges(net_settings)
593         nova_output = os.path.join(APEX_TEMP_DIR, 'nova_output')
594         deploy_vars['overcloud_nodes'] = parsers.parse_nova_output(
595             nova_output)
596         deploy_vars['SSH_OPTIONS'] = '-o StrictHostKeyChecking=no -o ' \
597                                      'GlobalKnownHostsFile=/dev/null -o ' \
598                                      'UserKnownHostsFile=/dev/null -o ' \
599                                      'LogLevel=error'
600         deploy_vars['external_network_cmds'] = \
601             oc_deploy.external_network_cmds(net_settings, deploy_settings)
602         # TODO(trozet): just parse all ds_opts as deploy vars one time
603         deploy_vars['gluon'] = ds_opts['gluon']
604         deploy_vars['sdn'] = ds_opts['sdn_controller']
605         for dep_option in 'yardstick', 'dovetail', 'vsperf':
606             if dep_option in ds_opts:
607                 deploy_vars[dep_option] = ds_opts[dep_option]
608             else:
609                 deploy_vars[dep_option] = False
610         deploy_vars['dataplane'] = ds_opts['dataplane']
611         overcloudrc = os.path.join(APEX_TEMP_DIR, 'overcloudrc')
612         if ds_opts['congress']:
613             deploy_vars['congress_datasources'] = \
614                 oc_deploy.create_congress_cmds(overcloudrc)
615             deploy_vars['congress'] = True
616         else:
617             deploy_vars['congress'] = False
618         deploy_vars['calipso'] = ds_opts.get('calipso', False)
619         deploy_vars['calipso_ip'] = undercloud_admin_ip
620         # overcloudrc.v3 removed and set as default in queens and later
621         if os_version == 'pike':
622             deploy_vars['overcloudrc_files'] = ['overcloudrc',
623                                                 'overcloudrc.v3']
624         else:
625             deploy_vars['overcloudrc_files'] = ['overcloudrc']
626
627         post_undercloud = os.path.join(args.lib_dir,
628                                        constants.ANSIBLE_PATH,
629                                        'post_deploy_undercloud.yml')
630         logging.info("Executing post deploy configuration undercloud "
631                      "playbook")
632         try:
633             utils.run_ansible(deploy_vars, post_undercloud,
634                               host=undercloud.ip, user='stack',
635                               tmp_dir=APEX_TEMP_DIR)
636             logging.info("Post Deploy Undercloud Configuration Complete")
637         except Exception:
638             logging.error("Post Deploy Undercloud Configuration failed.  "
639                           "Please check log")
640             raise
641
642         # Deploy kubernetes if enabled
643         # (TODO)zshi move handling of kubernetes deployment
644         # to its own deployment class
645         if deploy_vars['vim'] == 'k8s':
646             # clone kubespray repo
647             git.Repo.clone_from(constants.KUBESPRAY_URL,
648                                 os.path.join(APEX_TEMP_DIR, 'kubespray'))
649             shutil.copytree(
650                 os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
651                              'sample'),
652                 os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
653                              'apex'))
654             k8s_node_inventory = {
655                 'all':
656                     {'hosts': {},
657                      'children': {
658                          'k8s-cluster': {
659                              'children': {
660                                  'kube-master': {
661                                      'hosts': {}
662                                  },
663                                  'kube-node': {
664                                      'hosts': {}
665                                  }
666                              }
667                          },
668                          'etcd': {
669                              'hosts': {}
670                          }
671                     }
672                     }
673             }
674             for node, ip in deploy_vars['overcloud_nodes'].items():
675                 k8s_node_inventory['all']['hosts'][node] = {
676                     'ansible_become': True,
677                     'ansible_ssh_host': ip,
678                     'ansible_become_user': 'root',
679                     'ip': ip
680                 }
681                 if 'controller' in node:
682                     k8s_node_inventory['all']['children']['k8s-cluster'][
683                         'children']['kube-master']['hosts'][node] = None
684                     k8s_node_inventory['all']['children']['etcd'][
685                         'hosts'][node] = None
686                 elif 'compute' in node:
687                     k8s_node_inventory['all']['children']['k8s-cluster'][
688                         'children']['kube-node']['hosts'][node] = None
689
690             kubespray_dir = os.path.join(APEX_TEMP_DIR, 'kubespray')
691             with open(os.path.join(kubespray_dir, 'inventory', 'apex',
692                                    'apex.yaml'), 'w') as invfile:
693                 yaml.dump(k8s_node_inventory, invfile,
694                           default_flow_style=False)
695             k8s_deploy_vars = {}
696             # Add kubespray ansible control variables in k8s_deploy_vars,
697             # example: 'kube_network_plugin': 'flannel'
698             k8s_deploy = os.path.join(kubespray_dir, 'cluster.yml')
699             k8s_deploy_inv_file = os.path.join(kubespray_dir, 'inventory',
700                                                'apex', 'apex.yaml')
701
702             k8s_remove_pkgs = os.path.join(args.lib_dir,
703                                            constants.ANSIBLE_PATH,
704                                            'k8s_remove_pkgs.yml')
705             try:
706                 logging.debug("Removing any existing overcloud docker "
707                               "packages")
708                 utils.run_ansible(k8s_deploy_vars, k8s_remove_pkgs,
709                                   host=k8s_deploy_inv_file,
710                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
711                 logging.info("k8s Deploy Remove Existing Docker Related "
712                              "Packages Complete")
713             except Exception:
714                 logging.error("k8s Deploy Remove Existing Docker Related "
715                               "Packages failed. Please check log")
716                 raise
717
718             try:
719                 utils.run_ansible(k8s_deploy_vars, k8s_deploy,
720                                   host=k8s_deploy_inv_file,
721                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
722                 logging.info("k8s Deploy Overcloud Configuration Complete")
723             except Exception:
724                 logging.error("k8s Deploy Overcloud Configuration failed."
725                               "Please check log")
726                 raise
727
728         # Post deploy overcloud node configuration
729         # TODO(trozet): just parse all ds_opts as deploy vars one time
730         deploy_vars['sfc'] = ds_opts['sfc']
731         deploy_vars['vpn'] = ds_opts['vpn']
732         deploy_vars['l2gw'] = ds_opts.get('l2gw')
733         deploy_vars['sriov'] = ds_opts.get('sriov')
734         deploy_vars['tacker'] = ds_opts.get('tacker')
735         deploy_vars['all_in_one'] = all_in_one
736         # TODO(trozet): pull all logs and store in tmp dir in overcloud
737         # playbook
738         post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
739                                       'post_deploy_overcloud.yml')
740         # Run per overcloud node
741         for node, ip in deploy_vars['overcloud_nodes'].items():
742             logging.info("Executing Post deploy overcloud playbook on "
743                          "node {}".format(node))
744             try:
745                 utils.run_ansible(deploy_vars, post_overcloud, host=ip,
746                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
747                 logging.info("Post Deploy Overcloud Configuration Complete "
748                              "for node {}".format(node))
749             except Exception:
750                 logging.error("Post Deploy Overcloud Configuration failed "
751                               "for node {}. Please check log".format(node))
752                 raise
753         logging.info("Apex deployment complete")
754         logging.info("Undercloud IP: {}, please connect by doing "
755                      "'opnfv-util undercloud'".format(undercloud.ip))
756         # TODO(trozet): add logging here showing controller VIP and horizon url
757
758
759 if __name__ == '__main__':
760     main()