Fixes undercloud install failure with setting hostname
[apex.git] / apex / deploy.py
index a056138..d0c2b20 100644 (file)
@@ -10,6 +10,7 @@
 ##############################################################################
 
 import argparse
+import git
 import json
 import logging
 import os
@@ -18,30 +19,37 @@ import pprint
 import shutil
 import sys
 import tempfile
+import yaml
 
 import apex.virtual.configure_vm as vm_lib
-import apex.virtual.virtual_utils as virt_utils
+import apex.virtual.utils as virt_utils
+import apex.builders.common_builder as c_builder
+import apex.builders.overcloud_builder as oc_builder
+import apex.builders.undercloud_builder as uc_builder
 from apex import DeploySettings
 from apex import Inventory
 from apex import NetworkEnvironment
 from apex import NetworkSettings
+from apex.deployment.snapshot import SnapshotDeployment
 from apex.common import utils
 from apex.common import constants
 from apex.common import parsers
 from apex.common.exceptions import ApexDeployException
+from apex.deployment.tripleo import ApexDeployment
 from apex.network import jumphost
+from apex.network import network_data
 from apex.undercloud import undercloud as uc_lib
 from apex.overcloud import config as oc_cfg
-from apex.overcloud import overcloud_deploy
+from apex.overcloud import deploy as oc_deploy
 
 APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
-ANSIBLE_PATH = 'ansible/playbooks'
 SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
-
-
-def deploy_quickstart(args, deploy_settings_file, network_settings_file,
-                      inventory_file=None):
-    pass
+UC_DISK_FILES = [
+    'overcloud-full.vmlinuz',
+    'overcloud-full.initrd',
+    'ironic-python-agent.initramfs',
+    'ironic-python-agent.kernel'
+]
 
 
 def validate_cross_settings(deploy_settings, net_settings, inventory):
@@ -58,6 +66,14 @@ def validate_cross_settings(deploy_settings, net_settings, inventory):
         raise ApexDeployException("Setting a DPDK based dataplane requires"
                                   "a dedicated NIC for tenant network")
 
+    if 'odl_vpp_routing_node' in deploy_settings['deploy_options']:
+        if deploy_settings['deploy_options']['dataplane'] != 'fdio':
+            raise ApexDeployException("odl_vpp_routing_node should only be set"
+                                      "when dataplane is set to fdio")
+        if deploy_settings['deploy_options'].get('dvr') is True:
+            raise ApexDeployException("odl_vpp_routing_node should only be set"
+                                      "when dvr is not enabled")
+
     # TODO(trozet): add more checks here like RAM for ODL, etc
     # check if odl_vpp_netvirt is true and vpp is set
     # Check if fdio and nosdn:
@@ -101,7 +117,7 @@ def create_deploy_parser():
                                help='File which contains Apex deploy settings')
     deploy_parser.add_argument('-n', '--network-settings',
                                dest='network_settings_file',
-                               required=True,
+                               required=False,
                                help='File which contains Apex network '
                                     'settings')
     deploy_parser.add_argument('-i', '--inventory-file',
@@ -112,7 +128,8 @@ def create_deploy_parser():
     deploy_parser.add_argument('-e', '--environment-file',
                                dest='env_file',
                                default='opnfv-environment.yaml',
-                               help='Provide alternate base env file')
+                               help='Provide alternate base env file located '
+                                    'in deploy_dir')
     deploy_parser.add_argument('-v', '--virtual', action='store_true',
                                default=False,
                                dest='virtual',
@@ -161,9 +178,29 @@ def create_deploy_parser():
                                default='/usr/share/opnfv-apex',
                                help='Directory path for apex ansible '
                                     'and third party libs')
-    deploy_parser.add_argument('--quickstart', action='store_true',
+    deploy_parser.add_argument('-s', '--snapshot', action='store_true',
                                default=False,
-                               help='Use tripleo-quickstart to deploy')
+                               help='Use snapshots for deployment')
+    deploy_parser.add_argument('--snap-cache', dest='snap_cache',
+                               default="{}/snap_cache".format(
+                                   os.path.expanduser('~')),
+                               help='Local directory to cache snapshot '
+                                    'artifacts. Defaults to $HOME/snap_cache')
+    deploy_parser.add_argument('--upstream', action='store_true',
+                               default=True,
+                               help='Force deployment to use upstream '
+                                    'artifacts. This option is now '
+                                    'deprecated and only upstream '
+                                    'deployments are supported.')
+    deploy_parser.add_argument('--no-fetch', action='store_true',
+                               default=False,
+                               help='Ignore fetching latest upstream and '
+                                    'use what is in cache')
+    deploy_parser.add_argument('-p', '--patches',
+                               default='/etc/opnfv-apex/common-patches.yaml',
+                               dest='patches_file',
+                               help='File to include for common patches '
+                                    'which apply to all deployment scenarios')
     return deploy_parser
 
 
@@ -175,20 +212,25 @@ def validate_deploy_args(args):
     """
 
     logging.debug('Validating arguments for deployment')
-    if args.virtual and args.inventory_file is not None:
+    if args.snapshot:
+        logging.debug('Skipping inventory validation as it is not applicable'
+                      'to snapshot deployments')
+    elif args.virtual and args.inventory_file is not None:
         logging.error("Virtual enabled but inventory file also given")
         raise ApexDeployException('You should not specify an inventory file '
                                   'with virtual deployments')
     elif args.virtual:
         args.inventory_file = os.path.join(APEX_TEMP_DIR,
                                            'inventory-virt.yaml')
-    elif os.path.isfile(args.inventory_file) is False:
+    elif not os.path.isfile(args.inventory_file):
         logging.error("Specified inventory file does not exist: {}".format(
             args.inventory_file))
         raise ApexDeployException('Specified inventory file does not exist')
 
     for settings_file in (args.deploy_settings_file,
                           args.network_settings_file):
+        if settings_file == args.network_settings_file and args.snapshot:
+            continue
         if os.path.isfile(settings_file) is False:
             logging.error("Specified settings file does not "
                           "exist: {}".format(settings_file))
@@ -217,72 +259,99 @@ def main():
     console.setLevel(log_level)
     console.setFormatter(logging.Formatter(formatter))
     logging.getLogger('').addHandler(console)
+    utils.install_ansible()
     validate_deploy_args(args)
     # Parse all settings
     deploy_settings = DeploySettings(args.deploy_settings_file)
     logging.info("Deploy settings are:\n {}".format(pprint.pformat(
-                 deploy_settings)))
-    net_settings = NetworkSettings(args.network_settings_file)
-    logging.info("Network settings are:\n {}".format(pprint.pformat(
-                 net_settings)))
-    net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
-    net_env = NetworkEnvironment(net_settings, net_env_file)
-    net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
-    utils.dump_yaml(dict(net_env), net_env_target)
-    ha_enabled = deploy_settings['global_params']['ha_enabled']
-    if args.virtual:
-        if args.virt_compute_ram is None:
-            compute_ram = args.virt_default_ram
-        else:
-            compute_ram = args.virt_compute_ram
-        if deploy_settings['deploy_options']['sdn_controller'] == \
-                'opendaylight' and args.virt_default_ram < 12:
-            control_ram = 12
-            logging.warning('RAM per controller is too low.  OpenDaylight '
-                            'requires at least 12GB per controller.')
-            logging.info('Increasing RAM per controller to 12GB')
-        elif args.virt_default_ram < 10:
-            control_ram = 10
-            logging.warning('RAM per controller is too low.  nosdn '
-                            'requires at least 10GB per controller.')
-            logging.info('Increasing RAM per controller to 10GB')
-        else:
-            control_ram = args.virt_default_ram
-        if ha_enabled and args.virt_compute_nodes < 2:
-            logging.debug('HA enabled, bumping number of compute nodes to 2')
-            args.virt_compute_nodes = 2
-        virt_utils.generate_inventory(args.inventory_file, ha_enabled,
-                                      num_computes=args.virt_compute_nodes,
-                                      controller_ram=control_ram * 1024,
-                                      compute_ram=compute_ram * 1024,
-                                      vcpus=args.virt_cpus
-                                      )
-    inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
-
-    validate_cross_settings(deploy_settings, net_settings, inventory)
-
-    if args.quickstart:
-        deploy_settings_file = os.path.join(APEX_TEMP_DIR,
-                                            'apex_deploy_settings.yaml')
-        utils.dump_yaml(utils.dict_objects_to_str(deploy_settings),
-                        deploy_settings_file)
-        logging.info("File created: {}".format(deploy_settings_file))
-        network_settings_file = os.path.join(APEX_TEMP_DIR,
-                                             'apex_network_settings.yaml')
-        utils.dump_yaml(utils.dict_objects_to_str(net_settings),
-                        network_settings_file)
-        logging.info("File created: {}".format(network_settings_file))
-        deploy_quickstart(args, deploy_settings_file, network_settings_file,
-                          args.inventory_file)
+        deploy_settings)))
+
+    if not args.snapshot:
+        net_settings = NetworkSettings(args.network_settings_file)
+        logging.info("Network settings are:\n {}".format(pprint.pformat(
+            net_settings)))
+        os_version = deploy_settings['deploy_options']['os_version']
+        net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
+        net_env = NetworkEnvironment(net_settings, net_env_file,
+                                     os_version=os_version)
+        net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
+        utils.dump_yaml(dict(net_env), net_env_target)
+
+        # get global deploy params
+        ha_enabled = deploy_settings['global_params']['ha_enabled']
+        introspect = deploy_settings['global_params'].get('introspect', True)
+        net_list = net_settings.enabled_network_list
+        if args.virtual:
+            if args.virt_compute_ram is None:
+                compute_ram = args.virt_default_ram
+            else:
+                compute_ram = args.virt_compute_ram
+            if (deploy_settings['deploy_options']['sdn_controller'] ==
+                    'opendaylight' and args.virt_default_ram < 12):
+                control_ram = 12
+                logging.warning('RAM per controller is too low.  OpenDaylight '
+                                'requires at least 12GB per controller.')
+                logging.info('Increasing RAM per controller to 12GB')
+            elif args.virt_default_ram < 10:
+                if platform.machine() == 'aarch64':
+                    control_ram = 16
+                    logging.warning('RAM per controller is too low for '
+                                    'aarch64 ')
+                    logging.info('Increasing RAM per controller to 16GB')
+                else:
+                    control_ram = 10
+                    logging.warning('RAM per controller is too low.  nosdn '
+                                    'requires at least 10GB per controller.')
+                    logging.info('Increasing RAM per controller to 10GB')
+            else:
+                control_ram = args.virt_default_ram
+            if platform.machine() == 'aarch64' and args.virt_cpus < 16:
+                vcpus = 16
+                logging.warning('aarch64 requires at least 16 vCPUS per '
+                                'target VM. Increasing to 16.')
+            else:
+                vcpus = args.virt_cpus
+            if ha_enabled and args.virt_compute_nodes < 2:
+                logging.debug(
+                    'HA enabled, bumping number of compute nodes to 2')
+                args.virt_compute_nodes = 2
+            virt_utils.generate_inventory(args.inventory_file, ha_enabled,
+                                          num_computes=args.virt_compute_nodes,
+                                          controller_ram=control_ram * 1024,
+                                          compute_ram=compute_ram * 1024,
+                                          vcpus=vcpus
+                                          )
+        inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
+        logging.info("Inventory is:\n {}".format(pprint.pformat(
+            inventory)))
+
+        validate_cross_settings(deploy_settings, net_settings, inventory)
+    else:
+        # only one network with snapshots
+        net_list = [constants.ADMIN_NETWORK]
+
+    ds_opts = deploy_settings['deploy_options']
+    ansible_args = {
+        'virsh_enabled_networks': net_list,
+        'snapshot': args.snapshot
+    }
+    utils.run_ansible(ansible_args,
+                      os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
+                                   'deploy_dependencies.yml'))
+    all_in_one = not bool(args.virt_compute_nodes)
+    if args.snapshot:
+        # Start snapshot Deployment
+        logging.info('Executing Snapshot Deployment...')
+        SnapshotDeployment(deploy_settings=deploy_settings,
+                           snap_cache_dir=args.snap_cache,
+                           fetch=not args.no_fetch,
+                           all_in_one=all_in_one)
     else:
+        # Start Standard TripleO Deployment
+        deployment = ApexDeployment(deploy_settings, args.patches_file,
+                                    args.deploy_settings_file)
         # TODO (trozet): add logic back from:
         # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
-        ansible_args = {
-            'virsh_enabled_networks': net_settings.enabled_network_list
-        }
-        utils.run_ansible(ansible_args,
-                          os.path.join(args.lib_dir, ANSIBLE_PATH,
-                                       'deploy_dependencies.yml'))
         uc_external = False
         if 'external' in net_settings.enabled_network_list:
             uc_external = True
@@ -304,7 +373,6 @@ def main():
                         'members'][0]
                 bridge = "br-{}".format(network)
                 jumphost.attach_interface_to_ovs(bridge, iface, network)
-        # Dump all settings out to temp bash files to be sourced
         instackenv_json = os.path.join(APEX_TEMP_DIR, 'instackenv.json')
         with open(instackenv_json, 'w') as fh:
             json.dump(inventory, fh)
@@ -314,38 +382,172 @@ def main():
             root_pw = constants.DEBUG_OVERCLOUD_PW
         else:
             root_pw = None
+
+        if not args.upstream:
+            logging.warning("Using upstream is now required for Apex. "
+                            "Forcing upstream to true")
+        if os_version == 'master':
+            branch = 'master'
+        else:
+            branch = "stable/{}".format(os_version)
+
+        logging.info("Deploying with upstream artifacts for OpenStack "
+                     "{}".format(os_version))
+        args.image_dir = os.path.join(args.image_dir, os_version)
+        upstream_url = constants.UPSTREAM_RDO.replace(
+            constants.DEFAULT_OS_VERSION, os_version)
+
+        upstream_targets = ['overcloud-full.tar', 'ironic-python-agent.tar']
+        if platform.machine() == 'aarch64':
+            upstream_targets.append('undercloud.qcow2')
+        utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
+                                        upstream_targets,
+                                        fetch=not args.no_fetch)
+        # Copy ironic files and overcloud ramdisk and kernel into temp dir
+        # to be copied by ansible into undercloud /home/stack
+        # Note the overcloud disk does not need to be copied here as it will
+        # be modified and copied later
+        for tmp_file in UC_DISK_FILES:
+            shutil.copyfile(os.path.join(args.image_dir, tmp_file),
+                            os.path.join(APEX_TEMP_DIR, tmp_file))
+        if platform.machine() == 'aarch64':
+            sdn_image = os.path.join(args.image_dir, 'undercloud.qcow2')
+        else:
+            sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
+        # copy undercloud so we don't taint upstream fetch
+        uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
+        uc_fetch_img = sdn_image
+        shutil.copyfile(uc_fetch_img, uc_image)
+        # prep undercloud with required packages
+        if platform.machine() != 'aarch64':
+            uc_builder.update_repos(image=uc_image,
+                                    branch=branch.replace('stable/', ''))
+        uc_builder.add_upstream_packages(uc_image)
+        uc_builder.inject_calipso_installer(APEX_TEMP_DIR, uc_image)
+        # add patches from upstream to undercloud and overcloud
+        logging.info('Adding patches to undercloud')
+        patches = deployment.determine_patches()
+        c_builder.add_upstream_patches(patches['undercloud'], uc_image,
+                                       APEX_TEMP_DIR, branch)
+
+        # Create/Start Undercloud VM
         undercloud = uc_lib.Undercloud(args.image_dir,
                                        args.deploy_dir,
                                        root_pw=root_pw,
-                                       external_network=uc_external)
+                                       external_network=uc_external,
+                                       image_name=os.path.basename(uc_image),
+                                       os_version=os_version)
         undercloud.start()
+        undercloud_admin_ip = net_settings['networks'][
+            constants.ADMIN_NETWORK]['installer_vm']['ip']
+
+        if ds_opts['containers']:
+            tag = constants.DOCKER_TAG
+        else:
+            tag = None
 
         # Generate nic templates
         for role in 'compute', 'controller':
             oc_cfg.create_nic_template(net_settings, deploy_settings, role,
                                        args.deploy_dir, APEX_TEMP_DIR)
+        # Prepare/Upload docker images
+        docker_env = 'containers-prepare-parameter.yaml'
+        shutil.copyfile(os.path.join(args.deploy_dir, docker_env),
+                        os.path.join(APEX_TEMP_DIR, docker_env))
+        # Upload extra ansible.cfg
+        if platform.machine() == 'aarch64':
+            ansible_env = 'ansible.cfg'
+            shutil.copyfile(os.path.join(args.deploy_dir, ansible_env),
+                            os.path.join(APEX_TEMP_DIR, ansible_env))
+
+        c_builder.prepare_container_images(
+            os.path.join(APEX_TEMP_DIR, docker_env),
+            branch=branch.replace('stable/', ''),
+            neutron_driver=c_builder.get_neutron_driver(ds_opts)
+        )
         # Install Undercloud
-        undercloud.configure(net_settings,
-                             os.path.join(args.lib_dir, ANSIBLE_PATH,
+        undercloud.configure(net_settings, deploy_settings,
+                             os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                           'configure_undercloud.yml'),
-                             APEX_TEMP_DIR)
+                             APEX_TEMP_DIR, virtual_oc=args.virtual)
 
         # Prepare overcloud-full.qcow2
         logging.info("Preparing Overcloud for deployment...")
-        sdn_image = os.path.join(args.image_dir, SDN_IMAGE)
-        overcloud_deploy.prep_image(deploy_settings, sdn_image, APEX_TEMP_DIR,
-                                    root_pw=root_pw)
+        if os_version != 'ocata':
+            net_data_file = os.path.join(APEX_TEMP_DIR, 'network_data.yaml')
+            net_data = network_data.create_network_data(net_settings,
+                                                        net_data_file)
+        else:
+            net_data = False
+
+        shutil.copyfile(os.path.join(args.deploy_dir, 'build_ovs_nsh.sh'),
+                        os.path.join(APEX_TEMP_DIR, 'build_ovs_nsh.sh'))
+
+        # TODO(trozet): Either fix opnfv env or default to use upstream env
+        if args.env_file == 'opnfv-environment.yaml':
+            # Override the env_file if it is defaulted to opnfv
+            # opnfv env file will not work with upstream
+            args.env_file = 'upstream-environment.yaml'
         opnfv_env = os.path.join(args.deploy_dir, args.env_file)
-        overcloud_deploy.prep_env(deploy_settings, net_settings, opnfv_env,
-                                  net_env_target, APEX_TEMP_DIR)
-        overcloud_deploy.create_deploy_cmd(deploy_settings, net_settings,
-                                           inventory, APEX_TEMP_DIR,
-                                           args.virtual, args.env_file)
-        deploy_playbook = os.path.join(args.lib_dir, ANSIBLE_PATH,
+        oc_deploy.prep_env(deploy_settings, net_settings, inventory,
+                           opnfv_env, net_env_target, APEX_TEMP_DIR)
+        if not args.virtual:
+            oc_deploy.LOOP_DEVICE_SIZE = "50G"
+        if platform.machine() == 'aarch64':
+            oc_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
+        else:
+            oc_image = sdn_image
+        patched_containers = oc_deploy.prep_image(
+            deploy_settings, net_settings, oc_image, APEX_TEMP_DIR,
+            root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'])
+
+        oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
+                                    APEX_TEMP_DIR, args.virtual,
+                                    os.path.basename(opnfv_env),
+                                    net_data=net_data)
+        # Prepare undercloud with containers
+        docker_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
+                                       'prepare_overcloud_containers.yml')
+        if ds_opts['containers']:
+            logging.info("Preparing Undercloud with Docker containers")
+            sdn_env = oc_deploy.get_docker_sdn_files(ds_opts)
+            sdn_env_files = str()
+            for sdn_file in sdn_env:
+                sdn_env_files += " -e {}".format(sdn_file)
+            if patched_containers:
+                oc_builder.archive_docker_patches(APEX_TEMP_DIR)
+            container_vars = dict()
+            container_vars['apex_temp_dir'] = APEX_TEMP_DIR
+            container_vars['patched_docker_services'] = list(
+                patched_containers)
+            container_vars['container_tag'] = constants.DOCKER_TAG
+            container_vars['stackrc'] = 'source /home/stack/stackrc'
+            container_vars['sdn'] = ds_opts['sdn_controller']
+            container_vars['undercloud_ip'] = undercloud_admin_ip
+            container_vars['os_version'] = os_version
+            container_vars['aarch64'] = platform.machine() == 'aarch64'
+            container_vars['sdn_env_file'] = sdn_env_files
+            container_vars['container_client'] = utils.find_container_client(
+                os_version)
+            try:
+                utils.run_ansible(container_vars, docker_playbook,
+                                  host=undercloud.ip, user='stack',
+                                  tmp_dir=APEX_TEMP_DIR)
+                logging.info("Container preparation complete")
+            except Exception:
+                logging.error("Unable to complete container prep on "
+                              "Undercloud")
+                for tmp_file in UC_DISK_FILES:
+                    os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
+                os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
+                raise
+
+        deploy_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                        'deploy_overcloud.yml')
         virt_env = 'virtual-environment.yaml'
         bm_env = 'baremetal-environment.yaml'
-        for p_env in virt_env, bm_env:
+        k8s_env = 'kubernetes-environment.yaml'
+        for p_env in virt_env, bm_env, k8s_env:
             shutil.copyfile(os.path.join(args.deploy_dir, p_env),
                             os.path.join(APEX_TEMP_DIR, p_env))
 
@@ -355,10 +557,22 @@ def main():
         deploy_vars['virtual'] = args.virtual
         deploy_vars['debug'] = args.debug
         deploy_vars['aarch64'] = platform.machine() == 'aarch64'
+        deploy_vars['introspect'] = not (args.virtual or
+                                         deploy_vars['aarch64'] or
+                                         not introspect)
         deploy_vars['dns_server_args'] = ''
         deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
+        deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
         deploy_vars['stackrc'] = 'source /home/stack/stackrc'
         deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
+        deploy_vars['undercloud_ip'] = undercloud_admin_ip
+        deploy_vars['ha_enabled'] = ha_enabled
+        deploy_vars['os_version'] = os_version
+        deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
+        deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
+        deploy_vars['vim'] = ds_opts['vim']
+        deploy_vars['container_client'] = utils.find_container_client(
+            os_version)
         for dns_server in net_settings['dns_servers']:
             deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
                 dns_server)
@@ -367,10 +581,15 @@ def main():
                               user='stack', tmp_dir=APEX_TEMP_DIR)
             logging.info("Overcloud deployment complete")
         except Exception:
-            logging.error("Deployment Failed.  Please check log")
+            logging.error("Deployment Failed.  Please check deploy log as "
+                          "well as mistral logs in "
+                          "{}".format(os.path.join(APEX_TEMP_DIR,
+                                                   'mistral_logs.tar.gz')))
             raise
         finally:
             os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
+            for tmp_file in UC_DISK_FILES:
+                os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
 
         # Post install
         logging.info("Executing post deploy configuration")
@@ -383,9 +602,8 @@ def main():
                                      'UserKnownHostsFile=/dev/null -o ' \
                                      'LogLevel=error'
         deploy_vars['external_network_cmds'] = \
-            overcloud_deploy.external_network_cmds(net_settings)
+            oc_deploy.external_network_cmds(net_settings, deploy_settings)
         # TODO(trozet): just parse all ds_opts as deploy vars one time
-        ds_opts = deploy_settings['deploy_options']
         deploy_vars['gluon'] = ds_opts['gluon']
         deploy_vars['sdn'] = ds_opts['sdn_controller']
         for dep_option in 'yardstick', 'dovetail', 'vsperf':
@@ -397,39 +615,134 @@ def main():
         overcloudrc = os.path.join(APEX_TEMP_DIR, 'overcloudrc')
         if ds_opts['congress']:
             deploy_vars['congress_datasources'] = \
-                overcloud_deploy.create_congress_cmds(overcloudrc)
+                oc_deploy.create_congress_cmds(overcloudrc)
             deploy_vars['congress'] = True
         else:
             deploy_vars['congress'] = False
-        # TODO(trozet): this is probably redundant with getting external
-        # network info from undercloud.py
-        if 'external' in net_settings.enabled_network_list:
-            ext_cidr = net_settings['networks']['external'][0]['cidr']
+        deploy_vars['calipso'] = ds_opts.get('calipso', False)
+        deploy_vars['calipso_ip'] = undercloud_admin_ip
+        # overcloudrc.v3 removed and set as default in queens and later
+        if os_version == 'pike':
+            deploy_vars['overcloudrc_files'] = ['overcloudrc',
+                                                'overcloudrc.v3']
         else:
-            ext_cidr = net_settings['networks']['admin']['cidr']
-        deploy_vars['external_cidr'] = str(ext_cidr)
-        if ext_cidr.version == 6:
-            deploy_vars['external_network_ipv6'] = True
-        else:
-            deploy_vars['external_network_ipv6'] = False
-        post_undercloud = os.path.join(args.lib_dir, ANSIBLE_PATH,
+            deploy_vars['overcloudrc_files'] = ['overcloudrc']
+
+        post_undercloud = os.path.join(args.lib_dir,
+                                       constants.ANSIBLE_PATH,
                                        'post_deploy_undercloud.yml')
-        logging.info("Executing post deploy configuration undercloud playbook")
+        logging.info("Executing post deploy configuration undercloud "
+                     "playbook")
         try:
-            utils.run_ansible(deploy_vars, post_undercloud, host=undercloud.ip,
-                              user='stack', tmp_dir=APEX_TEMP_DIR)
+            utils.run_ansible(deploy_vars, post_undercloud,
+                              host=undercloud.ip, user='stack',
+                              tmp_dir=APEX_TEMP_DIR)
             logging.info("Post Deploy Undercloud Configuration Complete")
         except Exception:
             logging.error("Post Deploy Undercloud Configuration failed.  "
                           "Please check log")
             raise
+
+        # Deploy kubernetes if enabled
+        # (TODO)zshi move handling of kubernetes deployment
+        # to its own deployment class
+        if deploy_vars['vim'] == 'k8s':
+            # clone kubespray repo
+            git.Repo.clone_from(constants.KUBESPRAY_URL,
+                                os.path.join(APEX_TEMP_DIR, 'kubespray'))
+            shutil.copytree(
+                os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
+                             'sample'),
+                os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
+                             'apex'))
+            k8s_node_inventory = {
+                'all':
+                    {'hosts': {},
+                     'children': {
+                         'k8s-cluster': {
+                             'children': {
+                                 'kube-master': {
+                                     'hosts': {}
+                                 },
+                                 'kube-node': {
+                                     'hosts': {}
+                                 }
+                             }
+                         },
+                         'etcd': {
+                             'hosts': {}
+                         }
+                    }
+                    }
+            }
+            for node, ip in deploy_vars['overcloud_nodes'].items():
+                k8s_node_inventory['all']['hosts'][node] = {
+                    'ansible_become': True,
+                    'ansible_ssh_host': ip,
+                    'ansible_become_user': 'root',
+                    'ip': ip
+                }
+                if 'controller' in node:
+                    k8s_node_inventory['all']['children']['k8s-cluster'][
+                        'children']['kube-master']['hosts'][node] = None
+                    k8s_node_inventory['all']['children']['etcd'][
+                        'hosts'][node] = None
+                elif 'compute' in node:
+                    k8s_node_inventory['all']['children']['k8s-cluster'][
+                        'children']['kube-node']['hosts'][node] = None
+
+            kubespray_dir = os.path.join(APEX_TEMP_DIR, 'kubespray')
+            with open(os.path.join(kubespray_dir, 'inventory', 'apex',
+                                   'apex.yaml'), 'w') as invfile:
+                yaml.dump(k8s_node_inventory, invfile,
+                          default_flow_style=False)
+            k8s_deploy_vars = {}
+            # Add kubespray ansible control variables in k8s_deploy_vars,
+            # example: 'kube_network_plugin': 'flannel'
+            k8s_deploy = os.path.join(kubespray_dir, 'cluster.yml')
+            k8s_deploy_inv_file = os.path.join(kubespray_dir, 'inventory',
+                                               'apex', 'apex.yaml')
+
+            k8s_remove_pkgs = os.path.join(args.lib_dir,
+                                           constants.ANSIBLE_PATH,
+                                           'k8s_remove_pkgs.yml')
+            try:
+                logging.debug("Removing any existing overcloud docker "
+                              "packages")
+                utils.run_ansible(k8s_deploy_vars, k8s_remove_pkgs,
+                                  host=k8s_deploy_inv_file,
+                                  user='heat-admin', tmp_dir=APEX_TEMP_DIR)
+                logging.info("k8s Deploy Remove Existing Docker Related "
+                             "Packages Complete")
+            except Exception:
+                logging.error("k8s Deploy Remove Existing Docker Related "
+                              "Packages failed. Please check log")
+                raise
+
+            try:
+                utils.run_ansible(k8s_deploy_vars, k8s_deploy,
+                                  host=k8s_deploy_inv_file,
+                                  user='heat-admin', tmp_dir=APEX_TEMP_DIR)
+                logging.info("k8s Deploy Overcloud Configuration Complete")
+            except Exception:
+                logging.error("k8s Deploy Overcloud Configuration failed."
+                              "Please check log")
+                raise
+
         # Post deploy overcloud node configuration
         # TODO(trozet): just parse all ds_opts as deploy vars one time
         deploy_vars['sfc'] = ds_opts['sfc']
         deploy_vars['vpn'] = ds_opts['vpn']
+        deploy_vars['l2gw'] = ds_opts.get('l2gw')
+        deploy_vars['sriov'] = ds_opts.get('sriov')
+        deploy_vars['tacker'] = ds_opts.get('tacker')
+        deploy_vars['all_in_one'] = all_in_one
+        # TODO(trozet): need to set container client to docker until OOO
+        # migrates OC to podman. Remove this later.
+        deploy_vars['container_client'] = 'docker'
         # TODO(trozet): pull all logs and store in tmp dir in overcloud
         # playbook
-        post_overcloud = os.path.join(args.lib_dir, ANSIBLE_PATH,
+        post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                       'post_deploy_overcloud.yml')
         # Run per overcloud node
         for node, ip in deploy_vars['overcloud_nodes'].items():
@@ -448,5 +761,7 @@ def main():
         logging.info("Undercloud IP: {}, please connect by doing "
                      "'opnfv-util undercloud'".format(undercloud.ip))
         # TODO(trozet): add logging here showing controller VIP and horizon url
+
+
 if __name__ == '__main__':
     main()