Remove downloading undercloud.qcow2
[apex.git] / apex / deploy.py
1 #!/usr/bin/env python
2
3 ##############################################################################
4 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
5 #
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 ##############################################################################
11
12 import argparse
13 import git
14 import json
15 import logging
16 import os
17 import platform
18 import pprint
19 import shutil
20 import sys
21 import tempfile
22 import yaml
23
24 import apex.virtual.configure_vm as vm_lib
25 import apex.virtual.utils as virt_utils
26 import apex.builders.common_builder as c_builder
27 import apex.builders.overcloud_builder as oc_builder
28 import apex.builders.undercloud_builder as uc_builder
29 from apex import DeploySettings
30 from apex import Inventory
31 from apex import NetworkEnvironment
32 from apex import NetworkSettings
33 from apex.deployment.snapshot import SnapshotDeployment
34 from apex.common import utils
35 from apex.common import constants
36 from apex.common import parsers
37 from apex.common.exceptions import ApexDeployException
38 from apex.deployment.tripleo import ApexDeployment
39 from apex.network import jumphost
40 from apex.network import network_data
41 from apex.undercloud import undercloud as uc_lib
42 from apex.overcloud import config as oc_cfg
43 from apex.overcloud import deploy as oc_deploy
44
45 APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
46 SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
47 UC_DISK_FILES = [
48     'overcloud-full.vmlinuz',
49     'overcloud-full.initrd',
50     'ironic-python-agent.initramfs',
51     'ironic-python-agent.kernel'
52 ]
53
54
55 def validate_cross_settings(deploy_settings, net_settings, inventory):
56     """
57     Used to validate compatibility across settings file.
58     :param deploy_settings: parsed settings for deployment
59     :param net_settings: parsed settings for network
60     :param inventory: parsed inventory file
61     :return: None
62     """
63
64     if deploy_settings['deploy_options']['dataplane'] != 'ovs' and 'tenant' \
65             not in net_settings.enabled_network_list:
66         raise ApexDeployException("Setting a DPDK based dataplane requires"
67                                   "a dedicated NIC for tenant network")
68
69     if 'odl_vpp_routing_node' in deploy_settings['deploy_options']:
70         if deploy_settings['deploy_options']['dataplane'] != 'fdio':
71             raise ApexDeployException("odl_vpp_routing_node should only be set"
72                                       "when dataplane is set to fdio")
73         if deploy_settings['deploy_options'].get('dvr') is True:
74             raise ApexDeployException("odl_vpp_routing_node should only be set"
75                                       "when dvr is not enabled")
76
77     # TODO(trozet): add more checks here like RAM for ODL, etc
78     # check if odl_vpp_netvirt is true and vpp is set
79     # Check if fdio and nosdn:
80     # tenant_nic_mapping_controller_members" ==
81     # "$tenant_nic_mapping_compute_members
82
83
84 def build_vms(inventory, network_settings,
85               template_dir='/usr/share/opnfv-apex'):
86     """
87     Creates VMs and configures vbmc and host
88     :param inventory:
89     :param network_settings:
90     :return:
91     """
92
93     for idx, node in enumerate(inventory['nodes']):
94         name = 'baremetal{}'.format(idx)
95         volume = name + ".qcow2"
96         volume_path = os.path.join(constants.LIBVIRT_VOLUME_PATH, volume)
97         # TODO(trozet): add error checking
98         vm_lib.create_vm(
99             name, volume_path,
100             baremetal_interfaces=network_settings.enabled_network_list,
101             memory=node['memory'], cpus=node['cpu'],
102             macs=node['mac'],
103             template_dir=template_dir)
104         virt_utils.host_setup({name: node['pm_port']})
105
106
107 def create_deploy_parser():
108     deploy_parser = argparse.ArgumentParser()
109     deploy_parser.add_argument('--debug', action='store_true', default=False,
110                                help="Turn on debug messages")
111     deploy_parser.add_argument('-l', '--log-file',
112                                default='./apex_deploy.log',
113                                dest='log_file', help="Log file to log to")
114     deploy_parser.add_argument('-d', '--deploy-settings',
115                                dest='deploy_settings_file',
116                                required=True,
117                                help='File which contains Apex deploy settings')
118     deploy_parser.add_argument('-n', '--network-settings',
119                                dest='network_settings_file',
120                                required=False,
121                                help='File which contains Apex network '
122                                     'settings')
123     deploy_parser.add_argument('-i', '--inventory-file',
124                                dest='inventory_file',
125                                default=None,
126                                help='Inventory file which contains POD '
127                                     'definition')
128     deploy_parser.add_argument('-e', '--environment-file',
129                                dest='env_file',
130                                default='opnfv-environment.yaml',
131                                help='Provide alternate base env file located '
132                                     'in deploy_dir')
133     deploy_parser.add_argument('-v', '--virtual', action='store_true',
134                                default=False,
135                                dest='virtual',
136                                help='Enable virtual deployment')
137     deploy_parser.add_argument('--interactive', action='store_true',
138                                default=False,
139                                help='Enable interactive deployment mode which '
140                                     'requires user to confirm steps of '
141                                     'deployment')
142     deploy_parser.add_argument('--virtual-computes',
143                                dest='virt_compute_nodes',
144                                default=1,
145                                type=int,
146                                help='Number of Virtual Compute nodes to create'
147                                     ' and use during deployment (defaults to 1'
148                                     ' for noha and 2 for ha)')
149     deploy_parser.add_argument('--virtual-cpus',
150                                dest='virt_cpus',
151                                default=4,
152                                type=int,
153                                help='Number of CPUs to use per Overcloud VM in'
154                                     ' a virtual deployment (defaults to 4)')
155     deploy_parser.add_argument('--virtual-default-ram',
156                                dest='virt_default_ram',
157                                default=8,
158                                type=int,
159                                help='Amount of default RAM to use per '
160                                     'Overcloud VM in GB (defaults to 8).')
161     deploy_parser.add_argument('--virtual-compute-ram',
162                                dest='virt_compute_ram',
163                                default=None,
164                                type=int,
165                                help='Amount of RAM to use per Overcloud '
166                                     'Compute VM in GB (defaults to 8). '
167                                     'Overrides --virtual-default-ram arg for '
168                                     'computes')
169     deploy_parser.add_argument('--deploy-dir',
170                                default='/usr/share/opnfv-apex',
171                                help='Directory to deploy from which contains '
172                                     'base config files for deployment')
173     deploy_parser.add_argument('--image-dir',
174                                default='/var/opt/opnfv/images',
175                                help='Directory which contains '
176                                     'base disk images for deployment')
177     deploy_parser.add_argument('--lib-dir',
178                                default='/usr/share/opnfv-apex',
179                                help='Directory path for apex ansible '
180                                     'and third party libs')
181     deploy_parser.add_argument('-s', '--snapshot', action='store_true',
182                                default=False,
183                                help='Use snapshots for deployment')
184     deploy_parser.add_argument('--snap-cache', dest='snap_cache',
185                                default="{}/snap_cache".format(
186                                    os.path.expanduser('~')),
187                                help='Local directory to cache snapshot '
188                                     'artifacts. Defaults to $HOME/snap_cache')
189     deploy_parser.add_argument('--upstream', action='store_true',
190                                default=True,
191                                help='Force deployment to use upstream '
192                                     'artifacts. This option is now '
193                                     'deprecated and only upstream '
194                                     'deployments are supported.')
195     deploy_parser.add_argument('--no-fetch', action='store_true',
196                                default=False,
197                                help='Ignore fetching latest upstream and '
198                                     'use what is in cache')
199     deploy_parser.add_argument('-p', '--patches',
200                                default='/etc/opnfv-apex/common-patches.yaml',
201                                dest='patches_file',
202                                help='File to include for common patches '
203                                     'which apply to all deployment scenarios')
204     return deploy_parser
205
206
207 def validate_deploy_args(args):
208     """
209     Validates arguments for deploy
210     :param args:
211     :return: None
212     """
213
214     logging.debug('Validating arguments for deployment')
215     if args.snapshot:
216         logging.debug('Skipping inventory validation as it is not applicable'
217                       'to snapshot deployments')
218     elif args.virtual and args.inventory_file is not None:
219         logging.error("Virtual enabled but inventory file also given")
220         raise ApexDeployException('You should not specify an inventory file '
221                                   'with virtual deployments')
222     elif args.virtual:
223         args.inventory_file = os.path.join(APEX_TEMP_DIR,
224                                            'inventory-virt.yaml')
225     elif not os.path.isfile(args.inventory_file):
226         logging.error("Specified inventory file does not exist: {}".format(
227             args.inventory_file))
228         raise ApexDeployException('Specified inventory file does not exist')
229
230     for settings_file in (args.deploy_settings_file,
231                           args.network_settings_file):
232         if settings_file == args.network_settings_file and args.snapshot:
233             continue
234         if os.path.isfile(settings_file) is False:
235             logging.error("Specified settings file does not "
236                           "exist: {}".format(settings_file))
237             raise ApexDeployException('Specified settings file does not '
238                                       'exist: {}'.format(settings_file))
239
240
241 def main():
242     parser = create_deploy_parser()
243     args = parser.parse_args(sys.argv[1:])
244     # FIXME (trozet): this is only needed as a workaround for CI.  Remove
245     # when CI is changed
246     if os.getenv('IMAGES', False):
247         args.image_dir = os.getenv('IMAGES')
248     if args.debug:
249         log_level = logging.DEBUG
250     else:
251         log_level = logging.INFO
252     os.makedirs(os.path.dirname(args.log_file), exist_ok=True)
253     formatter = '%(asctime)s %(levelname)s: %(message)s'
254     logging.basicConfig(filename=args.log_file,
255                         format=formatter,
256                         datefmt='%m/%d/%Y %I:%M:%S %p',
257                         level=log_level)
258     console = logging.StreamHandler()
259     console.setLevel(log_level)
260     console.setFormatter(logging.Formatter(formatter))
261     logging.getLogger('').addHandler(console)
262     utils.install_ansible()
263     validate_deploy_args(args)
264     # Parse all settings
265     deploy_settings = DeploySettings(args.deploy_settings_file)
266     logging.info("Deploy settings are:\n {}".format(pprint.pformat(
267         deploy_settings)))
268
269     if not args.snapshot:
270         net_settings = NetworkSettings(args.network_settings_file)
271         logging.info("Network settings are:\n {}".format(pprint.pformat(
272             net_settings)))
273         os_version = deploy_settings['deploy_options']['os_version']
274         net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
275         net_env = NetworkEnvironment(net_settings, net_env_file,
276                                      os_version=os_version)
277         net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
278         utils.dump_yaml(dict(net_env), net_env_target)
279
280         # get global deploy params
281         ha_enabled = deploy_settings['global_params']['ha_enabled']
282         introspect = deploy_settings['global_params'].get('introspect', True)
283         net_list = net_settings.enabled_network_list
284         if args.virtual:
285             if args.virt_compute_ram is None:
286                 compute_ram = args.virt_default_ram
287             else:
288                 compute_ram = args.virt_compute_ram
289             if (deploy_settings['deploy_options']['sdn_controller'] ==
290                     'opendaylight' and args.virt_default_ram < 12):
291                 control_ram = 12
292                 logging.warning('RAM per controller is too low.  OpenDaylight '
293                                 'requires at least 12GB per controller.')
294                 logging.info('Increasing RAM per controller to 12GB')
295             elif args.virt_default_ram < 10:
296                 control_ram = 10
297                 logging.warning('RAM per controller is too low.  nosdn '
298                                 'requires at least 10GB per controller.')
299                 logging.info('Increasing RAM per controller to 10GB')
300             else:
301                 control_ram = args.virt_default_ram
302             if ha_enabled and args.virt_compute_nodes < 2:
303                 logging.debug(
304                     'HA enabled, bumping number of compute nodes to 2')
305                 args.virt_compute_nodes = 2
306             virt_utils.generate_inventory(args.inventory_file, ha_enabled,
307                                           num_computes=args.virt_compute_nodes,
308                                           controller_ram=control_ram * 1024,
309                                           compute_ram=compute_ram * 1024,
310                                           vcpus=args.virt_cpus
311                                           )
312         inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
313         logging.info("Inventory is:\n {}".format(pprint.pformat(
314             inventory)))
315
316         validate_cross_settings(deploy_settings, net_settings, inventory)
317     else:
318         # only one network with snapshots
319         net_list = [constants.ADMIN_NETWORK]
320
321     ds_opts = deploy_settings['deploy_options']
322     ansible_args = {
323         'virsh_enabled_networks': net_list,
324         'snapshot': args.snapshot
325     }
326     utils.run_ansible(ansible_args,
327                       os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
328                                    'deploy_dependencies.yml'))
329     if args.snapshot:
330         # Start snapshot Deployment
331         logging.info('Executing Snapshot Deployment...')
332         SnapshotDeployment(deploy_settings=deploy_settings,
333                            snap_cache_dir=args.snap_cache,
334                            fetch=not args.no_fetch,
335                            all_in_one=not bool(args.virt_compute_nodes))
336     else:
337         # Start Standard TripleO Deployment
338         deployment = ApexDeployment(deploy_settings, args.patches_file,
339                                     args.deploy_settings_file)
340         # TODO (trozet): add logic back from:
341         # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
342         uc_external = False
343         if 'external' in net_settings.enabled_network_list:
344             uc_external = True
345         if args.virtual:
346             # create all overcloud VMs
347             build_vms(inventory, net_settings, args.deploy_dir)
348         else:
349             # Attach interfaces to jumphost for baremetal deployment
350             jump_networks = ['admin']
351             if uc_external:
352                 jump_networks.append('external')
353             for network in jump_networks:
354                 if network == 'external':
355                     # TODO(trozet): enable vlan secondary external networks
356                     iface = net_settings['networks'][network][0][
357                         'installer_vm']['members'][0]
358                 else:
359                     iface = net_settings['networks'][network]['installer_vm'][
360                         'members'][0]
361                 bridge = "br-{}".format(network)
362                 jumphost.attach_interface_to_ovs(bridge, iface, network)
363         instackenv_json = os.path.join(APEX_TEMP_DIR, 'instackenv.json')
364         with open(instackenv_json, 'w') as fh:
365             json.dump(inventory, fh)
366
367         # Create and configure undercloud
368         if args.debug:
369             root_pw = constants.DEBUG_OVERCLOUD_PW
370         else:
371             root_pw = None
372
373         if not args.upstream:
374             logging.warning("Using upstream is now required for Apex. "
375                             "Forcing upstream to true")
376         if os_version == 'master':
377             branch = 'master'
378         else:
379             branch = "stable/{}".format(os_version)
380
381         logging.info("Deploying with upstream artifacts for OpenStack "
382                      "{}".format(os_version))
383         args.image_dir = os.path.join(args.image_dir, os_version)
384         upstream_url = constants.UPSTREAM_RDO.replace(
385             constants.DEFAULT_OS_VERSION, os_version)
386         upstream_targets = ['overcloud-full.tar', 'ironic-python-agent.tar']
387         utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
388                                         upstream_targets,
389                                         fetch=not args.no_fetch)
390         # Copy ironic files and overcloud ramdisk and kernel into temp dir
391         # to be copied by ansible into undercloud /home/stack
392         # Note the overcloud disk does not need to be copied here as it will
393         # be modified and copied later
394         for tmp_file in UC_DISK_FILES:
395             shutil.copyfile(os.path.join(args.image_dir, tmp_file),
396                             os.path.join(APEX_TEMP_DIR, tmp_file))
397         sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
398         # copy undercloud so we don't taint upstream fetch
399         uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
400         uc_fetch_img = sdn_image
401         shutil.copyfile(uc_fetch_img, uc_image)
402         # prep undercloud with required packages
403         if platform.machine() != 'aarch64':
404             uc_builder.update_repos(image=uc_image,
405                                     branch=branch.replace('stable/', ''))
406         uc_builder.add_upstream_packages(uc_image)
407         uc_builder.inject_calipso_installer(APEX_TEMP_DIR, uc_image)
408         # add patches from upstream to undercloud and overcloud
409         logging.info('Adding patches to undercloud')
410         patches = deployment.determine_patches()
411         c_builder.add_upstream_patches(patches['undercloud'], uc_image,
412                                        APEX_TEMP_DIR, branch)
413
414         # Create/Start Undercloud VM
415         undercloud = uc_lib.Undercloud(args.image_dir,
416                                        args.deploy_dir,
417                                        root_pw=root_pw,
418                                        external_network=uc_external,
419                                        image_name=os.path.basename(uc_image),
420                                        os_version=os_version)
421         undercloud.start()
422         undercloud_admin_ip = net_settings['networks'][
423             constants.ADMIN_NETWORK]['installer_vm']['ip']
424
425         if ds_opts['containers']:
426             tag = constants.DOCKER_TAG
427         else:
428             tag = None
429
430         # Generate nic templates
431         for role in 'compute', 'controller':
432             oc_cfg.create_nic_template(net_settings, deploy_settings, role,
433                                        args.deploy_dir, APEX_TEMP_DIR)
434         # Prepare/Upload docker images
435         docker_env = 'containers-prepare-parameter.yaml'
436         shutil.copyfile(os.path.join(args.deploy_dir, docker_env),
437                         os.path.join(APEX_TEMP_DIR, docker_env))
438         c_builder.prepare_container_images(
439             os.path.join(APEX_TEMP_DIR, docker_env),
440             branch=branch.replace('stable/', ''),
441             neutron_driver=c_builder.get_neutron_driver(ds_opts)
442         )
443         # Install Undercloud
444         undercloud.configure(net_settings, deploy_settings,
445                              os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
446                                           'configure_undercloud.yml'),
447                              APEX_TEMP_DIR, virtual_oc=args.virtual)
448
449         # Prepare overcloud-full.qcow2
450         logging.info("Preparing Overcloud for deployment...")
451         if os_version != 'ocata':
452             net_data_file = os.path.join(APEX_TEMP_DIR, 'network_data.yaml')
453             net_data = network_data.create_network_data(net_settings,
454                                                         net_data_file)
455         else:
456             net_data = False
457
458         shutil.copyfile(os.path.join(args.deploy_dir, 'build_ovs_nsh.sh'),
459                         os.path.join(APEX_TEMP_DIR, 'build_ovs_nsh.sh'))
460
461         # TODO(trozet): Either fix opnfv env or default to use upstream env
462         if args.env_file == 'opnfv-environment.yaml':
463             # Override the env_file if it is defaulted to opnfv
464             # opnfv env file will not work with upstream
465             args.env_file = 'upstream-environment.yaml'
466         opnfv_env = os.path.join(args.deploy_dir, args.env_file)
467         oc_deploy.prep_env(deploy_settings, net_settings, inventory,
468                            opnfv_env, net_env_target, APEX_TEMP_DIR)
469         if not args.virtual:
470             oc_deploy.LOOP_DEVICE_SIZE = "50G"
471         patched_containers = oc_deploy.prep_image(
472             deploy_settings, net_settings, sdn_image, APEX_TEMP_DIR,
473             root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'])
474
475         oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
476                                     APEX_TEMP_DIR, args.virtual,
477                                     os.path.basename(opnfv_env),
478                                     net_data=net_data)
479         # Prepare undercloud with containers
480         docker_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
481                                        'prepare_overcloud_containers.yml')
482         if ds_opts['containers']:
483             logging.info("Preparing Undercloud with Docker containers")
484             sdn_env = oc_deploy.get_docker_sdn_files(ds_opts)
485             sdn_env_files = str()
486             for sdn_file in sdn_env:
487                 sdn_env_files += " -e {}".format(sdn_file)
488             if patched_containers:
489                 oc_builder.archive_docker_patches(APEX_TEMP_DIR)
490             container_vars = dict()
491             container_vars['apex_temp_dir'] = APEX_TEMP_DIR
492             container_vars['patched_docker_services'] = list(
493                 patched_containers)
494             container_vars['container_tag'] = constants.DOCKER_TAG
495             container_vars['stackrc'] = 'source /home/stack/stackrc'
496             container_vars['sdn'] = ds_opts['sdn_controller']
497             container_vars['undercloud_ip'] = undercloud_admin_ip
498             container_vars['os_version'] = os_version
499             container_vars['aarch64'] = platform.machine() == 'aarch64'
500             container_vars['sdn_env_file'] = sdn_env_files
501             try:
502                 utils.run_ansible(container_vars, docker_playbook,
503                                   host=undercloud.ip, user='stack',
504                                   tmp_dir=APEX_TEMP_DIR)
505                 logging.info("Container preparation complete")
506             except Exception:
507                 logging.error("Unable to complete container prep on "
508                               "Undercloud")
509                 for tmp_file in UC_DISK_FILES:
510                     os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
511                 os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
512                 raise
513
514         deploy_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
515                                        'deploy_overcloud.yml')
516         virt_env = 'virtual-environment.yaml'
517         bm_env = 'baremetal-environment.yaml'
518         k8s_env = 'kubernetes-environment.yaml'
519         for p_env in virt_env, bm_env, k8s_env:
520             shutil.copyfile(os.path.join(args.deploy_dir, p_env),
521                             os.path.join(APEX_TEMP_DIR, p_env))
522
523         # Start Overcloud Deployment
524         logging.info("Executing Overcloud Deployment...")
525         deploy_vars = dict()
526         deploy_vars['virtual'] = args.virtual
527         deploy_vars['debug'] = args.debug
528         deploy_vars['aarch64'] = platform.machine() == 'aarch64'
529         deploy_vars['introspect'] = not (args.virtual or
530                                          deploy_vars['aarch64'] or
531                                          not introspect)
532         deploy_vars['dns_server_args'] = ''
533         deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
534         deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
535         deploy_vars['stackrc'] = 'source /home/stack/stackrc'
536         deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
537         deploy_vars['undercloud_ip'] = undercloud_admin_ip
538         deploy_vars['ha_enabled'] = ha_enabled
539         deploy_vars['os_version'] = os_version
540         deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
541         deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
542         deploy_vars['vim'] = ds_opts['vim']
543         for dns_server in net_settings['dns_servers']:
544             deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
545                 dns_server)
546         try:
547             utils.run_ansible(deploy_vars, deploy_playbook, host=undercloud.ip,
548                               user='stack', tmp_dir=APEX_TEMP_DIR)
549             logging.info("Overcloud deployment complete")
550         except Exception:
551             logging.error("Deployment Failed.  Please check deploy log as "
552                           "well as mistral logs in "
553                           "{}".format(os.path.join(APEX_TEMP_DIR,
554                                                    'mistral_logs.tar.gz')))
555             raise
556         finally:
557             os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
558             for tmp_file in UC_DISK_FILES:
559                 os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
560
561         # Post install
562         logging.info("Executing post deploy configuration")
563         jumphost.configure_bridges(net_settings)
564         nova_output = os.path.join(APEX_TEMP_DIR, 'nova_output')
565         deploy_vars['overcloud_nodes'] = parsers.parse_nova_output(
566             nova_output)
567         deploy_vars['SSH_OPTIONS'] = '-o StrictHostKeyChecking=no -o ' \
568                                      'GlobalKnownHostsFile=/dev/null -o ' \
569                                      'UserKnownHostsFile=/dev/null -o ' \
570                                      'LogLevel=error'
571         deploy_vars['external_network_cmds'] = \
572             oc_deploy.external_network_cmds(net_settings, deploy_settings)
573         # TODO(trozet): just parse all ds_opts as deploy vars one time
574         deploy_vars['gluon'] = ds_opts['gluon']
575         deploy_vars['sdn'] = ds_opts['sdn_controller']
576         for dep_option in 'yardstick', 'dovetail', 'vsperf':
577             if dep_option in ds_opts:
578                 deploy_vars[dep_option] = ds_opts[dep_option]
579             else:
580                 deploy_vars[dep_option] = False
581         deploy_vars['dataplane'] = ds_opts['dataplane']
582         overcloudrc = os.path.join(APEX_TEMP_DIR, 'overcloudrc')
583         if ds_opts['congress']:
584             deploy_vars['congress_datasources'] = \
585                 oc_deploy.create_congress_cmds(overcloudrc)
586             deploy_vars['congress'] = True
587         else:
588             deploy_vars['congress'] = False
589         deploy_vars['calipso'] = ds_opts.get('calipso', False)
590         deploy_vars['calipso_ip'] = undercloud_admin_ip
591         # overcloudrc.v3 removed and set as default in queens and later
592         if os_version == 'pike':
593             deploy_vars['overcloudrc_files'] = ['overcloudrc',
594                                                 'overcloudrc.v3']
595         else:
596             deploy_vars['overcloudrc_files'] = ['overcloudrc']
597
598         post_undercloud = os.path.join(args.lib_dir,
599                                        constants.ANSIBLE_PATH,
600                                        'post_deploy_undercloud.yml')
601         logging.info("Executing post deploy configuration undercloud "
602                      "playbook")
603         try:
604             utils.run_ansible(deploy_vars, post_undercloud,
605                               host=undercloud.ip, user='stack',
606                               tmp_dir=APEX_TEMP_DIR)
607             logging.info("Post Deploy Undercloud Configuration Complete")
608         except Exception:
609             logging.error("Post Deploy Undercloud Configuration failed.  "
610                           "Please check log")
611             raise
612
613         # Deploy kubernetes if enabled
614         # (TODO)zshi move handling of kubernetes deployment
615         # to its own deployment class
616         if deploy_vars['vim'] == 'k8s':
617             # clone kubespray repo
618             git.Repo.clone_from(constants.KUBESPRAY_URL,
619                                 os.path.join(APEX_TEMP_DIR, 'kubespray'))
620             shutil.copytree(
621                 os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
622                              'sample'),
623                 os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
624                              'apex'))
625             k8s_node_inventory = {
626                 'all':
627                     {'hosts': {},
628                      'children': {
629                          'k8s-cluster': {
630                              'children': {
631                                  'kube-master': {
632                                      'hosts': {}
633                                  },
634                                  'kube-node': {
635                                      'hosts': {}
636                                  }
637                              }
638                          },
639                          'etcd': {
640                              'hosts': {}
641                          }
642                     }
643                     }
644             }
645             for node, ip in deploy_vars['overcloud_nodes'].items():
646                 k8s_node_inventory['all']['hosts'][node] = {
647                     'ansible_become': True,
648                     'ansible_ssh_host': ip,
649                     'ansible_become_user': 'root',
650                     'ip': ip
651                 }
652                 if 'controller' in node:
653                     k8s_node_inventory['all']['children']['k8s-cluster'][
654                         'children']['kube-master']['hosts'][node] = None
655                     k8s_node_inventory['all']['children']['etcd'][
656                         'hosts'][node] = None
657                 elif 'compute' in node:
658                     k8s_node_inventory['all']['children']['k8s-cluster'][
659                         'children']['kube-node']['hosts'][node] = None
660
661             kubespray_dir = os.path.join(APEX_TEMP_DIR, 'kubespray')
662             with open(os.path.join(kubespray_dir, 'inventory', 'apex',
663                                    'apex.yaml'), 'w') as invfile:
664                 yaml.dump(k8s_node_inventory, invfile,
665                           default_flow_style=False)
666             k8s_deploy_vars = {}
667             # Add kubespray ansible control variables in k8s_deploy_vars,
668             # example: 'kube_network_plugin': 'flannel'
669             k8s_deploy = os.path.join(kubespray_dir, 'cluster.yml')
670             k8s_deploy_inv_file = os.path.join(kubespray_dir, 'inventory',
671                                                'apex', 'apex.yaml')
672
673             k8s_remove_pkgs = os.path.join(args.lib_dir,
674                                            constants.ANSIBLE_PATH,
675                                            'k8s_remove_pkgs.yml')
676             try:
677                 logging.debug("Removing any existing overcloud docker "
678                               "packages")
679                 utils.run_ansible(k8s_deploy_vars, k8s_remove_pkgs,
680                                   host=k8s_deploy_inv_file,
681                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
682                 logging.info("k8s Deploy Remove Existing Docker Related "
683                              "Packages Complete")
684             except Exception:
685                 logging.error("k8s Deploy Remove Existing Docker Related "
686                               "Packages failed. Please check log")
687                 raise
688
689             try:
690                 utils.run_ansible(k8s_deploy_vars, k8s_deploy,
691                                   host=k8s_deploy_inv_file,
692                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
693                 logging.info("k8s Deploy Overcloud Configuration Complete")
694             except Exception:
695                 logging.error("k8s Deploy Overcloud Configuration failed."
696                               "Please check log")
697                 raise
698
699         # Post deploy overcloud node configuration
700         # TODO(trozet): just parse all ds_opts as deploy vars one time
701         deploy_vars['sfc'] = ds_opts['sfc']
702         deploy_vars['vpn'] = ds_opts['vpn']
703         deploy_vars['l2gw'] = ds_opts.get('l2gw')
704         deploy_vars['sriov'] = ds_opts.get('sriov')
705         deploy_vars['tacker'] = ds_opts.get('tacker')
706         # TODO(trozet): pull all logs and store in tmp dir in overcloud
707         # playbook
708         post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
709                                       'post_deploy_overcloud.yml')
710         # Run per overcloud node
711         for node, ip in deploy_vars['overcloud_nodes'].items():
712             logging.info("Executing Post deploy overcloud playbook on "
713                          "node {}".format(node))
714             try:
715                 utils.run_ansible(deploy_vars, post_overcloud, host=ip,
716                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
717                 logging.info("Post Deploy Overcloud Configuration Complete "
718                              "for node {}".format(node))
719             except Exception:
720                 logging.error("Post Deploy Overcloud Configuration failed "
721                               "for node {}. Please check log".format(node))
722                 raise
723         logging.info("Apex deployment complete")
724         logging.info("Undercloud IP: {}, please connect by doing "
725                      "'opnfv-util undercloud'".format(undercloud.ip))
726         # TODO(trozet): add logging here showing controller VIP and horizon url
727
728
729 if __name__ == '__main__':
730     main()