Adds deployment via snapshot
[apex.git] / apex / deploy.py
1 #!/usr/bin/env python
2
3 ##############################################################################
4 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
5 #
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 ##############################################################################
11
12 import argparse
13 import git
14 import json
15 import logging
16 import os
17 import platform
18 import pprint
19 import shutil
20 import sys
21 import tempfile
22 import yaml
23
24 import apex.virtual.configure_vm as vm_lib
25 import apex.virtual.utils as virt_utils
26 import apex.builders.common_builder as c_builder
27 import apex.builders.overcloud_builder as oc_builder
28 import apex.builders.undercloud_builder as uc_builder
29 from apex import DeploySettings
30 from apex import Inventory
31 from apex import NetworkEnvironment
32 from apex import NetworkSettings
33 from apex.deployment.snapshot import SnapshotDeployment
34 from apex.common import utils
35 from apex.common import constants
36 from apex.common import parsers
37 from apex.common.exceptions import ApexDeployException
38 from apex.deployment.tripleo import ApexDeployment
39 from apex.network import jumphost
40 from apex.network import network_data
41 from apex.undercloud import undercloud as uc_lib
42 from apex.overcloud import config as oc_cfg
43 from apex.overcloud import deploy as oc_deploy
44
45 APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
46 SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
47
48
49 def validate_cross_settings(deploy_settings, net_settings, inventory):
50     """
51     Used to validate compatibility across settings file.
52     :param deploy_settings: parsed settings for deployment
53     :param net_settings: parsed settings for network
54     :param inventory: parsed inventory file
55     :return: None
56     """
57
58     if deploy_settings['deploy_options']['dataplane'] != 'ovs' and 'tenant' \
59             not in net_settings.enabled_network_list:
60         raise ApexDeployException("Setting a DPDK based dataplane requires"
61                                   "a dedicated NIC for tenant network")
62
63     if 'odl_vpp_routing_node' in deploy_settings['deploy_options']:
64         if deploy_settings['deploy_options']['dataplane'] != 'fdio':
65             raise ApexDeployException("odl_vpp_routing_node should only be set"
66                                       "when dataplane is set to fdio")
67         if deploy_settings['deploy_options'].get('dvr') is True:
68             raise ApexDeployException("odl_vpp_routing_node should only be set"
69                                       "when dvr is not enabled")
70
71     # TODO(trozet): add more checks here like RAM for ODL, etc
72     # check if odl_vpp_netvirt is true and vpp is set
73     # Check if fdio and nosdn:
74     # tenant_nic_mapping_controller_members" ==
75     # "$tenant_nic_mapping_compute_members
76
77
78 def build_vms(inventory, network_settings,
79               template_dir='/usr/share/opnfv-apex'):
80     """
81     Creates VMs and configures vbmc and host
82     :param inventory:
83     :param network_settings:
84     :return:
85     """
86
87     for idx, node in enumerate(inventory['nodes']):
88         name = 'baremetal{}'.format(idx)
89         volume = name + ".qcow2"
90         volume_path = os.path.join(constants.LIBVIRT_VOLUME_PATH, volume)
91         # TODO(trozet): add error checking
92         vm_lib.create_vm(
93             name, volume_path,
94             baremetal_interfaces=network_settings.enabled_network_list,
95             memory=node['memory'], cpus=node['cpu'],
96             macs=node['mac'],
97             template_dir=template_dir)
98         virt_utils.host_setup({name: node['pm_port']})
99
100
101 def create_deploy_parser():
102     deploy_parser = argparse.ArgumentParser()
103     deploy_parser.add_argument('--debug', action='store_true', default=False,
104                                help="Turn on debug messages")
105     deploy_parser.add_argument('-l', '--log-file',
106                                default='./apex_deploy.log',
107                                dest='log_file', help="Log file to log to")
108     deploy_parser.add_argument('-d', '--deploy-settings',
109                                dest='deploy_settings_file',
110                                required=True,
111                                help='File which contains Apex deploy settings')
112     deploy_parser.add_argument('-n', '--network-settings',
113                                dest='network_settings_file',
114                                required=False,
115                                help='File which contains Apex network '
116                                     'settings')
117     deploy_parser.add_argument('-i', '--inventory-file',
118                                dest='inventory_file',
119                                default=None,
120                                help='Inventory file which contains POD '
121                                     'definition')
122     deploy_parser.add_argument('-e', '--environment-file',
123                                dest='env_file',
124                                default='opnfv-environment.yaml',
125                                help='Provide alternate base env file located '
126                                     'in deploy_dir')
127     deploy_parser.add_argument('-v', '--virtual', action='store_true',
128                                default=False,
129                                dest='virtual',
130                                help='Enable virtual deployment')
131     deploy_parser.add_argument('--interactive', action='store_true',
132                                default=False,
133                                help='Enable interactive deployment mode which '
134                                     'requires user to confirm steps of '
135                                     'deployment')
136     deploy_parser.add_argument('--virtual-computes',
137                                dest='virt_compute_nodes',
138                                default=1,
139                                type=int,
140                                help='Number of Virtual Compute nodes to create'
141                                     ' and use during deployment (defaults to 1'
142                                     ' for noha and 2 for ha)')
143     deploy_parser.add_argument('--virtual-cpus',
144                                dest='virt_cpus',
145                                default=4,
146                                type=int,
147                                help='Number of CPUs to use per Overcloud VM in'
148                                     ' a virtual deployment (defaults to 4)')
149     deploy_parser.add_argument('--virtual-default-ram',
150                                dest='virt_default_ram',
151                                default=8,
152                                type=int,
153                                help='Amount of default RAM to use per '
154                                     'Overcloud VM in GB (defaults to 8).')
155     deploy_parser.add_argument('--virtual-compute-ram',
156                                dest='virt_compute_ram',
157                                default=None,
158                                type=int,
159                                help='Amount of RAM to use per Overcloud '
160                                     'Compute VM in GB (defaults to 8). '
161                                     'Overrides --virtual-default-ram arg for '
162                                     'computes')
163     deploy_parser.add_argument('--deploy-dir',
164                                default='/usr/share/opnfv-apex',
165                                help='Directory to deploy from which contains '
166                                     'base config files for deployment')
167     deploy_parser.add_argument('--image-dir',
168                                default='/var/opt/opnfv/images',
169                                help='Directory which contains '
170                                     'base disk images for deployment')
171     deploy_parser.add_argument('--lib-dir',
172                                default='/usr/share/opnfv-apex',
173                                help='Directory path for apex ansible '
174                                     'and third party libs')
175     deploy_parser.add_argument('-s', '--snapshot', action='store_true',
176                                default=False,
177                                help='Use snapshots for deployment')
178     deploy_parser.add_argument('--snap-cache', dest='snap_cache',
179                                default="{}/snap_cache".format(
180                                    os.path.expanduser('~')),
181                                help='Local directory to cache snapshot '
182                                     'artifacts. Defaults to $HOME/snap_cache')
183     deploy_parser.add_argument('--upstream', action='store_true',
184                                default=True,
185                                help='Force deployment to use upstream '
186                                     'artifacts. This option is now '
187                                     'deprecated and only upstream '
188                                     'deployments are supported.')
189     deploy_parser.add_argument('--no-fetch', action='store_true',
190                                default=False,
191                                help='Ignore fetching latest upstream and '
192                                     'use what is in cache')
193     deploy_parser.add_argument('-p', '--patches',
194                                default='/etc/opnfv-apex/common-patches.yaml',
195                                dest='patches_file',
196                                help='File to include for common patches '
197                                     'which apply to all deployment scenarios')
198     return deploy_parser
199
200
201 def validate_deploy_args(args):
202     """
203     Validates arguments for deploy
204     :param args:
205     :return: None
206     """
207
208     logging.debug('Validating arguments for deployment')
209     if args.snapshot:
210         logging.debug('Skipping inventory validation as it is not applicable'
211                       'to snapshot deployments')
212     elif args.virtual and args.inventory_file is not None:
213         logging.error("Virtual enabled but inventory file also given")
214         raise ApexDeployException('You should not specify an inventory file '
215                                   'with virtual deployments')
216     elif args.virtual:
217         args.inventory_file = os.path.join(APEX_TEMP_DIR,
218                                            'inventory-virt.yaml')
219     elif not os.path.isfile(args.inventory_file):
220         logging.error("Specified inventory file does not exist: {}".format(
221             args.inventory_file))
222         raise ApexDeployException('Specified inventory file does not exist')
223
224     for settings_file in (args.deploy_settings_file,
225                           args.network_settings_file):
226         if settings_file == args.network_settings_file and args.snapshot:
227             continue
228         if os.path.isfile(settings_file) is False:
229             logging.error("Specified settings file does not "
230                           "exist: {}".format(settings_file))
231             raise ApexDeployException('Specified settings file does not '
232                                       'exist: {}'.format(settings_file))
233
234
235 def main():
236     parser = create_deploy_parser()
237     args = parser.parse_args(sys.argv[1:])
238     # FIXME (trozet): this is only needed as a workaround for CI.  Remove
239     # when CI is changed
240     if os.getenv('IMAGES', False):
241         args.image_dir = os.getenv('IMAGES')
242     if args.debug:
243         log_level = logging.DEBUG
244     else:
245         log_level = logging.INFO
246     os.makedirs(os.path.dirname(args.log_file), exist_ok=True)
247     formatter = '%(asctime)s %(levelname)s: %(message)s'
248     logging.basicConfig(filename=args.log_file,
249                         format=formatter,
250                         datefmt='%m/%d/%Y %I:%M:%S %p',
251                         level=log_level)
252     console = logging.StreamHandler()
253     console.setLevel(log_level)
254     console.setFormatter(logging.Formatter(formatter))
255     logging.getLogger('').addHandler(console)
256     utils.install_ansible()
257     validate_deploy_args(args)
258     # Parse all settings
259     deploy_settings = DeploySettings(args.deploy_settings_file)
260     logging.info("Deploy settings are:\n {}".format(pprint.pformat(
261         deploy_settings)))
262
263     if not args.snapshot:
264         net_settings = NetworkSettings(args.network_settings_file)
265         logging.info("Network settings are:\n {}".format(pprint.pformat(
266             net_settings)))
267         os_version = deploy_settings['deploy_options']['os_version']
268         net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
269         net_env = NetworkEnvironment(net_settings, net_env_file,
270                                      os_version=os_version)
271         net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
272         utils.dump_yaml(dict(net_env), net_env_target)
273
274         # get global deploy params
275         ha_enabled = deploy_settings['global_params']['ha_enabled']
276         introspect = deploy_settings['global_params'].get('introspect', True)
277         net_list = net_settings.enabled_network_list
278         if args.virtual:
279             if args.virt_compute_ram is None:
280                 compute_ram = args.virt_default_ram
281             else:
282                 compute_ram = args.virt_compute_ram
283             if (deploy_settings['deploy_options']['sdn_controller'] ==
284                     'opendaylight' and args.virt_default_ram < 12):
285                 control_ram = 12
286                 logging.warning('RAM per controller is too low.  OpenDaylight '
287                                 'requires at least 12GB per controller.')
288                 logging.info('Increasing RAM per controller to 12GB')
289             elif args.virt_default_ram < 10:
290                 control_ram = 10
291                 logging.warning('RAM per controller is too low.  nosdn '
292                                 'requires at least 10GB per controller.')
293                 logging.info('Increasing RAM per controller to 10GB')
294             else:
295                 control_ram = args.virt_default_ram
296             if ha_enabled and args.virt_compute_nodes < 2:
297                 logging.debug(
298                     'HA enabled, bumping number of compute nodes to 2')
299                 args.virt_compute_nodes = 2
300             virt_utils.generate_inventory(args.inventory_file, ha_enabled,
301                                           num_computes=args.virt_compute_nodes,
302                                           controller_ram=control_ram * 1024,
303                                           compute_ram=compute_ram * 1024,
304                                           vcpus=args.virt_cpus
305                                           )
306         inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
307         logging.info("Inventory is:\n {}".format(pprint.pformat(
308             inventory)))
309
310         validate_cross_settings(deploy_settings, net_settings, inventory)
311     else:
312         # only one network with snapshots
313         net_list = [constants.ADMIN_NETWORK]
314
315     ds_opts = deploy_settings['deploy_options']
316     ansible_args = {
317         'virsh_enabled_networks': net_list,
318         'snapshot': args.snapshot
319     }
320     utils.run_ansible(ansible_args,
321                       os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
322                                    'deploy_dependencies.yml'))
323     if args.snapshot:
324         # Start snapshot Deployment
325         logging.info('Executing Snapshot Deployment...')
326         SnapshotDeployment(deploy_settings=deploy_settings,
327                            snap_cache_dir=args.snap_cache,
328                            fetch=not args.no_fetch,
329                            all_in_one=not bool(args.virt_compute_nodes))
330     else:
331         # Start Standard TripleO Deployment
332         deployment = ApexDeployment(deploy_settings, args.patches_file,
333                                     args.deploy_settings_file)
334         # TODO (trozet): add logic back from:
335         # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
336         uc_external = False
337         if 'external' in net_settings.enabled_network_list:
338             uc_external = True
339         if args.virtual:
340             # create all overcloud VMs
341             build_vms(inventory, net_settings, args.deploy_dir)
342         else:
343             # Attach interfaces to jumphost for baremetal deployment
344             jump_networks = ['admin']
345             if uc_external:
346                 jump_networks.append('external')
347             for network in jump_networks:
348                 if network == 'external':
349                     # TODO(trozet): enable vlan secondary external networks
350                     iface = net_settings['networks'][network][0][
351                         'installer_vm']['members'][0]
352                 else:
353                     iface = net_settings['networks'][network]['installer_vm'][
354                         'members'][0]
355                 bridge = "br-{}".format(network)
356                 jumphost.attach_interface_to_ovs(bridge, iface, network)
357         instackenv_json = os.path.join(APEX_TEMP_DIR, 'instackenv.json')
358         with open(instackenv_json, 'w') as fh:
359             json.dump(inventory, fh)
360
361         # Create and configure undercloud
362         if args.debug:
363             root_pw = constants.DEBUG_OVERCLOUD_PW
364         else:
365             root_pw = None
366
367         if not args.upstream:
368             logging.warning("Using upstream is now required for Apex. "
369                             "Forcing upstream to true")
370         if os_version == 'master':
371             branch = 'master'
372         else:
373             branch = "stable/{}".format(os_version)
374
375         logging.info("Deploying with upstream artifacts for OpenStack "
376                      "{}".format(os_version))
377         args.image_dir = os.path.join(args.image_dir, os_version)
378         upstream_url = constants.UPSTREAM_RDO.replace(
379             constants.DEFAULT_OS_VERSION, os_version)
380         upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2']
381         utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
382                                         upstream_targets,
383                                         fetch=not args.no_fetch)
384         sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
385         # copy undercloud so we don't taint upstream fetch
386         uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
387         uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2')
388         shutil.copyfile(uc_fetch_img, uc_image)
389         # prep undercloud with required packages
390         uc_builder.add_upstream_packages(uc_image)
391         # add patches from upstream to undercloud and overcloud
392         logging.info('Adding patches to undercloud')
393         patches = deployment.determine_patches()
394         c_builder.add_upstream_patches(patches['undercloud'], uc_image,
395                                        APEX_TEMP_DIR, branch)
396
397         # Create/Start Undercloud VM
398         undercloud = uc_lib.Undercloud(args.image_dir,
399                                        args.deploy_dir,
400                                        root_pw=root_pw,
401                                        external_network=uc_external,
402                                        image_name=os.path.basename(uc_image),
403                                        os_version=os_version)
404         undercloud.start()
405         undercloud_admin_ip = net_settings['networks'][
406             constants.ADMIN_NETWORK]['installer_vm']['ip']
407
408         if ds_opts['containers']:
409             tag = constants.DOCKER_TAG
410         else:
411             tag = None
412
413         # Generate nic templates
414         for role in 'compute', 'controller':
415             oc_cfg.create_nic_template(net_settings, deploy_settings, role,
416                                        args.deploy_dir, APEX_TEMP_DIR)
417         # Install Undercloud
418         undercloud.configure(net_settings, deploy_settings,
419                              os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
420                                           'configure_undercloud.yml'),
421                              APEX_TEMP_DIR, virtual_oc=args.virtual)
422
423         # Prepare overcloud-full.qcow2
424         logging.info("Preparing Overcloud for deployment...")
425         if os_version != 'ocata':
426             net_data_file = os.path.join(APEX_TEMP_DIR, 'network_data.yaml')
427             net_data = network_data.create_network_data(net_settings,
428                                                         net_data_file)
429         else:
430             net_data = False
431
432         shutil.copyfile(os.path.join(args.deploy_dir, 'build_ovs_nsh.sh'),
433                         os.path.join(APEX_TEMP_DIR, 'build_ovs_nsh.sh'))
434
435         # TODO(trozet): Either fix opnfv env or default to use upstream env
436         if args.env_file == 'opnfv-environment.yaml':
437             # Override the env_file if it is defaulted to opnfv
438             # opnfv env file will not work with upstream
439             args.env_file = 'upstream-environment.yaml'
440         opnfv_env = os.path.join(args.deploy_dir, args.env_file)
441         oc_deploy.prep_env(deploy_settings, net_settings, inventory,
442                            opnfv_env, net_env_target, APEX_TEMP_DIR)
443         if not args.virtual:
444             oc_deploy.LOOP_DEVICE_SIZE = "50G"
445         patched_containers = oc_deploy.prep_image(
446             deploy_settings, net_settings, sdn_image, APEX_TEMP_DIR,
447             root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'])
448
449         oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
450                                     APEX_TEMP_DIR, args.virtual,
451                                     os.path.basename(opnfv_env),
452                                     net_data=net_data)
453         # Prepare undercloud with containers
454         docker_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
455                                        'prepare_overcloud_containers.yml')
456         if ds_opts['containers']:
457             logging.info("Preparing Undercloud with Docker containers")
458             sdn_env = oc_deploy.get_docker_sdn_files(ds_opts)
459             sdn_env_files = str()
460             for sdn_file in sdn_env:
461                 sdn_env_files += " -e {}".format(sdn_file)
462             if patched_containers:
463                 oc_builder.archive_docker_patches(APEX_TEMP_DIR)
464             container_vars = dict()
465             container_vars['apex_temp_dir'] = APEX_TEMP_DIR
466             container_vars['patched_docker_services'] = list(
467                 patched_containers)
468             container_vars['container_tag'] = constants.DOCKER_TAG
469             container_vars['stackrc'] = 'source /home/stack/stackrc'
470             container_vars['sdn'] = ds_opts['sdn_controller']
471             container_vars['undercloud_ip'] = undercloud_admin_ip
472             container_vars['os_version'] = os_version
473             container_vars['aarch64'] = platform.machine() == 'aarch64'
474             container_vars['sdn_env_file'] = sdn_env_files
475             try:
476                 utils.run_ansible(container_vars, docker_playbook,
477                                   host=undercloud.ip, user='stack',
478                                   tmp_dir=APEX_TEMP_DIR)
479                 logging.info("Container preparation complete")
480             except Exception:
481                 logging.error("Unable to complete container prep on "
482                               "Undercloud")
483                 os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
484                 raise
485
486         deploy_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
487                                        'deploy_overcloud.yml')
488         virt_env = 'virtual-environment.yaml'
489         bm_env = 'baremetal-environment.yaml'
490         k8s_env = 'kubernetes-environment.yaml'
491         for p_env in virt_env, bm_env, k8s_env:
492             shutil.copyfile(os.path.join(args.deploy_dir, p_env),
493                             os.path.join(APEX_TEMP_DIR, p_env))
494
495         # Start Overcloud Deployment
496         logging.info("Executing Overcloud Deployment...")
497         deploy_vars = dict()
498         deploy_vars['virtual'] = args.virtual
499         deploy_vars['debug'] = args.debug
500         deploy_vars['aarch64'] = platform.machine() == 'aarch64'
501         deploy_vars['introspect'] = not (args.virtual or
502                                          deploy_vars['aarch64'] or
503                                          not introspect)
504         deploy_vars['dns_server_args'] = ''
505         deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
506         deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
507         deploy_vars['stackrc'] = 'source /home/stack/stackrc'
508         deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
509         deploy_vars['undercloud_ip'] = undercloud_admin_ip
510         deploy_vars['ha_enabled'] = ha_enabled
511         deploy_vars['os_version'] = os_version
512         deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
513         deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
514         deploy_vars['vim'] = ds_opts['vim']
515         for dns_server in net_settings['dns_servers']:
516             deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
517                 dns_server)
518         try:
519             utils.run_ansible(deploy_vars, deploy_playbook, host=undercloud.ip,
520                               user='stack', tmp_dir=APEX_TEMP_DIR)
521             logging.info("Overcloud deployment complete")
522         except Exception:
523             logging.error("Deployment Failed.  Please check deploy log as "
524                           "well as mistral logs in "
525                           "{}".format(os.path.join(APEX_TEMP_DIR,
526                                                    'mistral_logs.tar.gz')))
527             raise
528         finally:
529             os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
530
531         # Post install
532         logging.info("Executing post deploy configuration")
533         jumphost.configure_bridges(net_settings)
534         nova_output = os.path.join(APEX_TEMP_DIR, 'nova_output')
535         deploy_vars['overcloud_nodes'] = parsers.parse_nova_output(
536             nova_output)
537         deploy_vars['SSH_OPTIONS'] = '-o StrictHostKeyChecking=no -o ' \
538                                      'GlobalKnownHostsFile=/dev/null -o ' \
539                                      'UserKnownHostsFile=/dev/null -o ' \
540                                      'LogLevel=error'
541         deploy_vars['external_network_cmds'] = \
542             oc_deploy.external_network_cmds(net_settings, deploy_settings)
543         # TODO(trozet): just parse all ds_opts as deploy vars one time
544         deploy_vars['gluon'] = ds_opts['gluon']
545         deploy_vars['sdn'] = ds_opts['sdn_controller']
546         for dep_option in 'yardstick', 'dovetail', 'vsperf':
547             if dep_option in ds_opts:
548                 deploy_vars[dep_option] = ds_opts[dep_option]
549             else:
550                 deploy_vars[dep_option] = False
551         deploy_vars['dataplane'] = ds_opts['dataplane']
552         overcloudrc = os.path.join(APEX_TEMP_DIR, 'overcloudrc')
553         if ds_opts['congress']:
554             deploy_vars['congress_datasources'] = \
555                 oc_deploy.create_congress_cmds(overcloudrc)
556             deploy_vars['congress'] = True
557         else:
558             deploy_vars['congress'] = False
559         deploy_vars['calipso'] = ds_opts.get('calipso', False)
560         deploy_vars['calipso_ip'] = undercloud_admin_ip
561         # overcloudrc.v3 removed and set as default in queens and later
562         if os_version == 'pike':
563             deploy_vars['overcloudrc_files'] = ['overcloudrc',
564                                                 'overcloudrc.v3']
565         else:
566             deploy_vars['overcloudrc_files'] = ['overcloudrc']
567
568         post_undercloud = os.path.join(args.lib_dir,
569                                        constants.ANSIBLE_PATH,
570                                        'post_deploy_undercloud.yml')
571         logging.info("Executing post deploy configuration undercloud "
572                      "playbook")
573         try:
574             utils.run_ansible(deploy_vars, post_undercloud,
575                               host=undercloud.ip, user='stack',
576                               tmp_dir=APEX_TEMP_DIR)
577             logging.info("Post Deploy Undercloud Configuration Complete")
578         except Exception:
579             logging.error("Post Deploy Undercloud Configuration failed.  "
580                           "Please check log")
581             raise
582
583         # Deploy kubernetes if enabled
584         # (TODO)zshi move handling of kubernetes deployment
585         # to its own deployment class
586         if deploy_vars['vim'] == 'k8s':
587             # clone kubespray repo
588             git.Repo.clone_from(constants.KUBESPRAY_URL,
589                                 os.path.join(APEX_TEMP_DIR, 'kubespray'))
590             shutil.copytree(
591                 os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
592                              'sample'),
593                 os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
594                              'apex'))
595             k8s_node_inventory = {
596                 'all':
597                     {'hosts': {},
598                      'children': {
599                          'k8s-cluster': {
600                              'children': {
601                                  'kube-master': {
602                                      'hosts': {}
603                                  },
604                                  'kube-node': {
605                                      'hosts': {}
606                                  }
607                              }
608                          },
609                          'etcd': {
610                              'hosts': {}
611                          }
612                     }
613                     }
614             }
615             for node, ip in deploy_vars['overcloud_nodes'].items():
616                 k8s_node_inventory['all']['hosts'][node] = {
617                     'ansible_become': True,
618                     'ansible_ssh_host': ip,
619                     'ansible_become_user': 'root',
620                     'ip': ip
621                 }
622                 if 'controller' in node:
623                     k8s_node_inventory['all']['children']['k8s-cluster'][
624                         'children']['kube-master']['hosts'][node] = None
625                     k8s_node_inventory['all']['children']['etcd'][
626                         'hosts'][node] = None
627                 elif 'compute' in node:
628                     k8s_node_inventory['all']['children']['k8s-cluster'][
629                         'children']['kube-node']['hosts'][node] = None
630
631             kubespray_dir = os.path.join(APEX_TEMP_DIR, 'kubespray')
632             with open(os.path.join(kubespray_dir, 'inventory', 'apex',
633                                    'apex.yaml'), 'w') as invfile:
634                 yaml.dump(k8s_node_inventory, invfile,
635                           default_flow_style=False)
636             k8s_deploy_vars = {}
637             # Add kubespray ansible control variables in k8s_deploy_vars,
638             # example: 'kube_network_plugin': 'flannel'
639             k8s_deploy = os.path.join(kubespray_dir, 'cluster.yml')
640             k8s_deploy_inv_file = os.path.join(kubespray_dir, 'inventory',
641                                                'apex', 'apex.yaml')
642
643             k8s_remove_pkgs = os.path.join(args.lib_dir,
644                                            constants.ANSIBLE_PATH,
645                                            'k8s_remove_pkgs.yml')
646             try:
647                 logging.debug("Removing any existing overcloud docker "
648                               "packages")
649                 utils.run_ansible(k8s_deploy_vars, k8s_remove_pkgs,
650                                   host=k8s_deploy_inv_file,
651                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
652                 logging.info("k8s Deploy Remove Existing Docker Related "
653                              "Packages Complete")
654             except Exception:
655                 logging.error("k8s Deploy Remove Existing Docker Related "
656                               "Packages failed. Please check log")
657                 raise
658
659             try:
660                 utils.run_ansible(k8s_deploy_vars, k8s_deploy,
661                                   host=k8s_deploy_inv_file,
662                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
663                 logging.info("k8s Deploy Overcloud Configuration Complete")
664             except Exception:
665                 logging.error("k8s Deploy Overcloud Configuration failed."
666                               "Please check log")
667                 raise
668
669         # Post deploy overcloud node configuration
670         # TODO(trozet): just parse all ds_opts as deploy vars one time
671         deploy_vars['sfc'] = ds_opts['sfc']
672         deploy_vars['vpn'] = ds_opts['vpn']
673         deploy_vars['l2gw'] = ds_opts.get('l2gw')
674         deploy_vars['sriov'] = ds_opts.get('sriov')
675         deploy_vars['tacker'] = ds_opts.get('tacker')
676         # TODO(trozet): pull all logs and store in tmp dir in overcloud
677         # playbook
678         post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
679                                       'post_deploy_overcloud.yml')
680         # Run per overcloud node
681         for node, ip in deploy_vars['overcloud_nodes'].items():
682             logging.info("Executing Post deploy overcloud playbook on "
683                          "node {}".format(node))
684             try:
685                 utils.run_ansible(deploy_vars, post_overcloud, host=ip,
686                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
687                 logging.info("Post Deploy Overcloud Configuration Complete "
688                              "for node {}".format(node))
689             except Exception:
690                 logging.error("Post Deploy Overcloud Configuration failed "
691                               "for node {}. Please check log".format(node))
692                 raise
693         logging.info("Apex deployment complete")
694         logging.info("Undercloud IP: {}, please connect by doing "
695                      "'opnfv-util undercloud'".format(undercloud.ip))
696         # TODO(trozet): add logging here showing controller VIP and horizon url
697
698
699 if __name__ == '__main__':
700     main()