Remove downloading undercloud.qcow2
[apex.git] / apex / deploy.py
index 5485d15..b74d529 100644 (file)
@@ -10,6 +10,7 @@
 ##############################################################################
 
 import argparse
 ##############################################################################
 
 import argparse
+import git
 import json
 import logging
 import os
 import json
 import logging
 import os
@@ -18,20 +19,23 @@ import pprint
 import shutil
 import sys
 import tempfile
 import shutil
 import sys
 import tempfile
+import yaml
 
 import apex.virtual.configure_vm as vm_lib
 import apex.virtual.utils as virt_utils
 
 import apex.virtual.configure_vm as vm_lib
 import apex.virtual.utils as virt_utils
+import apex.builders.common_builder as c_builder
+import apex.builders.overcloud_builder as oc_builder
+import apex.builders.undercloud_builder as uc_builder
 from apex import DeploySettings
 from apex import Inventory
 from apex import NetworkEnvironment
 from apex import NetworkSettings
 from apex import DeploySettings
 from apex import Inventory
 from apex import NetworkEnvironment
 from apex import NetworkSettings
-from apex.builders import common_builder as c_builder
-from apex.builders import overcloud_builder as oc_builder
-from apex.builders import undercloud_builder as uc_builder
+from apex.deployment.snapshot import SnapshotDeployment
 from apex.common import utils
 from apex.common import constants
 from apex.common import parsers
 from apex.common.exceptions import ApexDeployException
 from apex.common import utils
 from apex.common import constants
 from apex.common import parsers
 from apex.common.exceptions import ApexDeployException
+from apex.deployment.tripleo import ApexDeployment
 from apex.network import jumphost
 from apex.network import network_data
 from apex.undercloud import undercloud as uc_lib
 from apex.network import jumphost
 from apex.network import network_data
 from apex.undercloud import undercloud as uc_lib
@@ -39,13 +43,13 @@ from apex.overcloud import config as oc_cfg
 from apex.overcloud import deploy as oc_deploy
 
 APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
 from apex.overcloud import deploy as oc_deploy
 
 APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
-ANSIBLE_PATH = 'ansible/playbooks'
 SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
 SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
-
-
-def deploy_quickstart(args, deploy_settings_file, network_settings_file,
-                      inventory_file=None):
-    pass
+UC_DISK_FILES = [
+    'overcloud-full.vmlinuz',
+    'overcloud-full.initrd',
+    'ironic-python-agent.initramfs',
+    'ironic-python-agent.kernel'
+]
 
 
 def validate_cross_settings(deploy_settings, net_settings, inventory):
 
 
 def validate_cross_settings(deploy_settings, net_settings, inventory):
@@ -113,7 +117,7 @@ def create_deploy_parser():
                                help='File which contains Apex deploy settings')
     deploy_parser.add_argument('-n', '--network-settings',
                                dest='network_settings_file',
                                help='File which contains Apex deploy settings')
     deploy_parser.add_argument('-n', '--network-settings',
                                dest='network_settings_file',
-                               required=True,
+                               required=False,
                                help='File which contains Apex network '
                                     'settings')
     deploy_parser.add_argument('-i', '--inventory-file',
                                help='File which contains Apex network '
                                     'settings')
     deploy_parser.add_argument('-i', '--inventory-file',
@@ -174,13 +178,29 @@ def create_deploy_parser():
                                default='/usr/share/opnfv-apex',
                                help='Directory path for apex ansible '
                                     'and third party libs')
                                default='/usr/share/opnfv-apex',
                                help='Directory path for apex ansible '
                                     'and third party libs')
-    deploy_parser.add_argument('--quickstart', action='store_true',
+    deploy_parser.add_argument('-s', '--snapshot', action='store_true',
                                default=False,
                                default=False,
-                               help='Use tripleo-quickstart to deploy')
+                               help='Use snapshots for deployment')
+    deploy_parser.add_argument('--snap-cache', dest='snap_cache',
+                               default="{}/snap_cache".format(
+                                   os.path.expanduser('~')),
+                               help='Local directory to cache snapshot '
+                                    'artifacts. Defaults to $HOME/snap_cache')
     deploy_parser.add_argument('--upstream', action='store_true',
     deploy_parser.add_argument('--upstream', action='store_true',
-                               default=False,
+                               default=True,
                                help='Force deployment to use upstream '
                                help='Force deployment to use upstream '
-                                    'artifacts')
+                                    'artifacts. This option is now '
+                                    'deprecated and only upstream '
+                                    'deployments are supported.')
+    deploy_parser.add_argument('--no-fetch', action='store_true',
+                               default=False,
+                               help='Ignore fetching latest upstream and '
+                                    'use what is in cache')
+    deploy_parser.add_argument('-p', '--patches',
+                               default='/etc/opnfv-apex/common-patches.yaml',
+                               dest='patches_file',
+                               help='File to include for common patches '
+                                    'which apply to all deployment scenarios')
     return deploy_parser
 
 
     return deploy_parser
 
 
@@ -192,20 +212,25 @@ def validate_deploy_args(args):
     """
 
     logging.debug('Validating arguments for deployment')
     """
 
     logging.debug('Validating arguments for deployment')
-    if args.virtual and args.inventory_file is not None:
+    if args.snapshot:
+        logging.debug('Skipping inventory validation as it is not applicable'
+                      'to snapshot deployments')
+    elif args.virtual and args.inventory_file is not None:
         logging.error("Virtual enabled but inventory file also given")
         raise ApexDeployException('You should not specify an inventory file '
                                   'with virtual deployments')
     elif args.virtual:
         args.inventory_file = os.path.join(APEX_TEMP_DIR,
                                            'inventory-virt.yaml')
         logging.error("Virtual enabled but inventory file also given")
         raise ApexDeployException('You should not specify an inventory file '
                                   'with virtual deployments')
     elif args.virtual:
         args.inventory_file = os.path.join(APEX_TEMP_DIR,
                                            'inventory-virt.yaml')
-    elif os.path.isfile(args.inventory_file) is False:
+    elif not os.path.isfile(args.inventory_file):
         logging.error("Specified inventory file does not exist: {}".format(
             args.inventory_file))
         raise ApexDeployException('Specified inventory file does not exist')
 
     for settings_file in (args.deploy_settings_file,
                           args.network_settings_file):
         logging.error("Specified inventory file does not exist: {}".format(
             args.inventory_file))
         raise ApexDeployException('Specified inventory file does not exist')
 
     for settings_file in (args.deploy_settings_file,
                           args.network_settings_file):
+        if settings_file == args.network_settings_file and args.snapshot:
+            continue
         if os.path.isfile(settings_file) is False:
             logging.error("Specified settings file does not "
                           "exist: {}".format(settings_file))
         if os.path.isfile(settings_file) is False:
             logging.error("Specified settings file does not "
                           "exist: {}".format(settings_file))
@@ -234,74 +259,86 @@ def main():
     console.setLevel(log_level)
     console.setFormatter(logging.Formatter(formatter))
     logging.getLogger('').addHandler(console)
     console.setLevel(log_level)
     console.setFormatter(logging.Formatter(formatter))
     logging.getLogger('').addHandler(console)
+    utils.install_ansible()
     validate_deploy_args(args)
     # Parse all settings
     deploy_settings = DeploySettings(args.deploy_settings_file)
     logging.info("Deploy settings are:\n {}".format(pprint.pformat(
     validate_deploy_args(args)
     # Parse all settings
     deploy_settings = DeploySettings(args.deploy_settings_file)
     logging.info("Deploy settings are:\n {}".format(pprint.pformat(
-                 deploy_settings)))
-    net_settings = NetworkSettings(args.network_settings_file)
-    logging.info("Network settings are:\n {}".format(pprint.pformat(
-                 net_settings)))
-    os_version = deploy_settings['deploy_options']['os_version']
-    net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
-    net_env = NetworkEnvironment(net_settings, net_env_file,
-                                 os_version=os_version)
-    net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
-    utils.dump_yaml(dict(net_env), net_env_target)
-    ha_enabled = deploy_settings['global_params']['ha_enabled']
-    if args.virtual:
-        if args.virt_compute_ram is None:
-            compute_ram = args.virt_default_ram
-        else:
-            compute_ram = args.virt_compute_ram
-        if deploy_settings['deploy_options']['sdn_controller'] == \
-                'opendaylight' and args.virt_default_ram < 12:
-            control_ram = 12
-            logging.warning('RAM per controller is too low.  OpenDaylight '
-                            'requires at least 12GB per controller.')
-            logging.info('Increasing RAM per controller to 12GB')
-        elif args.virt_default_ram < 10:
-            control_ram = 10
-            logging.warning('RAM per controller is too low.  nosdn '
-                            'requires at least 10GB per controller.')
-            logging.info('Increasing RAM per controller to 10GB')
-        else:
-            control_ram = args.virt_default_ram
-        if ha_enabled and args.virt_compute_nodes < 2:
-            logging.debug('HA enabled, bumping number of compute nodes to 2')
-            args.virt_compute_nodes = 2
-        virt_utils.generate_inventory(args.inventory_file, ha_enabled,
-                                      num_computes=args.virt_compute_nodes,
-                                      controller_ram=control_ram * 1024,
-                                      compute_ram=compute_ram * 1024,
-                                      vcpus=args.virt_cpus
-                                      )
-    inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
-
-    validate_cross_settings(deploy_settings, net_settings, inventory)
+        deploy_settings)))
+
+    if not args.snapshot:
+        net_settings = NetworkSettings(args.network_settings_file)
+        logging.info("Network settings are:\n {}".format(pprint.pformat(
+            net_settings)))
+        os_version = deploy_settings['deploy_options']['os_version']
+        net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
+        net_env = NetworkEnvironment(net_settings, net_env_file,
+                                     os_version=os_version)
+        net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
+        utils.dump_yaml(dict(net_env), net_env_target)
+
+        # get global deploy params
+        ha_enabled = deploy_settings['global_params']['ha_enabled']
+        introspect = deploy_settings['global_params'].get('introspect', True)
+        net_list = net_settings.enabled_network_list
+        if args.virtual:
+            if args.virt_compute_ram is None:
+                compute_ram = args.virt_default_ram
+            else:
+                compute_ram = args.virt_compute_ram
+            if (deploy_settings['deploy_options']['sdn_controller'] ==
+                    'opendaylight' and args.virt_default_ram < 12):
+                control_ram = 12
+                logging.warning('RAM per controller is too low.  OpenDaylight '
+                                'requires at least 12GB per controller.')
+                logging.info('Increasing RAM per controller to 12GB')
+            elif args.virt_default_ram < 10:
+                control_ram = 10
+                logging.warning('RAM per controller is too low.  nosdn '
+                                'requires at least 10GB per controller.')
+                logging.info('Increasing RAM per controller to 10GB')
+            else:
+                control_ram = args.virt_default_ram
+            if ha_enabled and args.virt_compute_nodes < 2:
+                logging.debug(
+                    'HA enabled, bumping number of compute nodes to 2')
+                args.virt_compute_nodes = 2
+            virt_utils.generate_inventory(args.inventory_file, ha_enabled,
+                                          num_computes=args.virt_compute_nodes,
+                                          controller_ram=control_ram * 1024,
+                                          compute_ram=compute_ram * 1024,
+                                          vcpus=args.virt_cpus
+                                          )
+        inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
+        logging.info("Inventory is:\n {}".format(pprint.pformat(
+            inventory)))
+
+        validate_cross_settings(deploy_settings, net_settings, inventory)
+    else:
+        # only one network with snapshots
+        net_list = [constants.ADMIN_NETWORK]
+
     ds_opts = deploy_settings['deploy_options']
     ds_opts = deploy_settings['deploy_options']
-    if args.quickstart:
-        deploy_settings_file = os.path.join(APEX_TEMP_DIR,
-                                            'apex_deploy_settings.yaml')
-        utils.dump_yaml(utils.dict_objects_to_str(deploy_settings),
-                        deploy_settings_file)
-        logging.info("File created: {}".format(deploy_settings_file))
-        network_settings_file = os.path.join(APEX_TEMP_DIR,
-                                             'apex_network_settings.yaml')
-        utils.dump_yaml(utils.dict_objects_to_str(net_settings),
-                        network_settings_file)
-        logging.info("File created: {}".format(network_settings_file))
-        deploy_quickstart(args, deploy_settings_file, network_settings_file,
-                          args.inventory_file)
+    ansible_args = {
+        'virsh_enabled_networks': net_list,
+        'snapshot': args.snapshot
+    }
+    utils.run_ansible(ansible_args,
+                      os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
+                                   'deploy_dependencies.yml'))
+    if args.snapshot:
+        # Start snapshot Deployment
+        logging.info('Executing Snapshot Deployment...')
+        SnapshotDeployment(deploy_settings=deploy_settings,
+                           snap_cache_dir=args.snap_cache,
+                           fetch=not args.no_fetch,
+                           all_in_one=not bool(args.virt_compute_nodes))
     else:
     else:
+        # Start Standard TripleO Deployment
+        deployment = ApexDeployment(deploy_settings, args.patches_file,
+                                    args.deploy_settings_file)
         # TODO (trozet): add logic back from:
         # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
         # TODO (trozet): add logic back from:
         # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
-        ansible_args = {
-            'virsh_enabled_networks': net_settings.enabled_network_list
-        }
-        utils.run_ansible(ansible_args,
-                          os.path.join(args.lib_dir, ANSIBLE_PATH,
-                                       'deploy_dependencies.yml'))
         uc_external = False
         if 'external' in net_settings.enabled_network_list:
             uc_external = True
         uc_external = False
         if 'external' in net_settings.enabled_network_list:
             uc_external = True
@@ -333,62 +370,81 @@ def main():
         else:
             root_pw = None
 
         else:
             root_pw = None
 
-        upstream = (os_version != constants.DEFAULT_OS_VERSION or
-                    args.upstream)
+        if not args.upstream:
+            logging.warning("Using upstream is now required for Apex. "
+                            "Forcing upstream to true")
         if os_version == 'master':
             branch = 'master'
         else:
             branch = "stable/{}".format(os_version)
         if os_version == 'master':
             branch = 'master'
         else:
             branch = "stable/{}".format(os_version)
-        if upstream:
-            logging.info("Deploying with upstream artifacts for OpenStack "
-                         "{}".format(os_version))
-            args.image_dir = os.path.join(args.image_dir, os_version)
-            upstream_url = constants.UPSTREAM_RDO.replace(
-                constants.DEFAULT_OS_VERSION, os_version)
-            upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2']
-            utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
-                                            upstream_targets)
-            sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
-            if ds_opts['sdn_controller'] == 'opendaylight':
-                logging.info("Preparing upstream image with OpenDaylight")
-                oc_builder.inject_opendaylight(
-                    odl_version=ds_opts['odl_version'],
-                    image=sdn_image,
-                    tmp_dir=APEX_TEMP_DIR
-                )
-            # copy undercloud so we don't taint upstream fetch
-            uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
-            uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2')
-            shutil.copyfile(uc_fetch_img, uc_image)
-            # prep undercloud with required packages
-            uc_builder.add_upstream_packages(uc_image)
-            # add patches from upstream to undercloud and overcloud
-            logging.info('Adding patches to undercloud')
-            patches = deploy_settings['global_params']['patches']
-            c_builder.add_upstream_patches(patches['undercloud'], uc_image,
-                                           APEX_TEMP_DIR, branch)
-            logging.info('Adding patches to overcloud')
-            c_builder.add_upstream_patches(patches['overcloud'], sdn_image,
-                                           APEX_TEMP_DIR, branch)
-        else:
-            sdn_image = os.path.join(args.image_dir, SDN_IMAGE)
-            uc_image = 'undercloud.qcow2'
+
+        logging.info("Deploying with upstream artifacts for OpenStack "
+                     "{}".format(os_version))
+        args.image_dir = os.path.join(args.image_dir, os_version)
+        upstream_url = constants.UPSTREAM_RDO.replace(
+            constants.DEFAULT_OS_VERSION, os_version)
+        upstream_targets = ['overcloud-full.tar', 'ironic-python-agent.tar']
+        utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
+                                        upstream_targets,
+                                        fetch=not args.no_fetch)
+        # Copy ironic files and overcloud ramdisk and kernel into temp dir
+        # to be copied by ansible into undercloud /home/stack
+        # Note the overcloud disk does not need to be copied here as it will
+        # be modified and copied later
+        for tmp_file in UC_DISK_FILES:
+            shutil.copyfile(os.path.join(args.image_dir, tmp_file),
+                            os.path.join(APEX_TEMP_DIR, tmp_file))
+        sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
+        # copy undercloud so we don't taint upstream fetch
+        uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
+        uc_fetch_img = sdn_image
+        shutil.copyfile(uc_fetch_img, uc_image)
+        # prep undercloud with required packages
+        if platform.machine() != 'aarch64':
+            uc_builder.update_repos(image=uc_image,
+                                    branch=branch.replace('stable/', ''))
+        uc_builder.add_upstream_packages(uc_image)
+        uc_builder.inject_calipso_installer(APEX_TEMP_DIR, uc_image)
+        # add patches from upstream to undercloud and overcloud
+        logging.info('Adding patches to undercloud')
+        patches = deployment.determine_patches()
+        c_builder.add_upstream_patches(patches['undercloud'], uc_image,
+                                       APEX_TEMP_DIR, branch)
+
+        # Create/Start Undercloud VM
         undercloud = uc_lib.Undercloud(args.image_dir,
                                        args.deploy_dir,
                                        root_pw=root_pw,
                                        external_network=uc_external,
         undercloud = uc_lib.Undercloud(args.image_dir,
                                        args.deploy_dir,
                                        root_pw=root_pw,
                                        external_network=uc_external,
-                                       image_name=os.path.basename(uc_image))
+                                       image_name=os.path.basename(uc_image),
+                                       os_version=os_version)
         undercloud.start()
         undercloud.start()
+        undercloud_admin_ip = net_settings['networks'][
+            constants.ADMIN_NETWORK]['installer_vm']['ip']
+
+        if ds_opts['containers']:
+            tag = constants.DOCKER_TAG
+        else:
+            tag = None
 
         # Generate nic templates
         for role in 'compute', 'controller':
             oc_cfg.create_nic_template(net_settings, deploy_settings, role,
                                        args.deploy_dir, APEX_TEMP_DIR)
 
         # Generate nic templates
         for role in 'compute', 'controller':
             oc_cfg.create_nic_template(net_settings, deploy_settings, role,
                                        args.deploy_dir, APEX_TEMP_DIR)
+        # Prepare/Upload docker images
+        docker_env = 'containers-prepare-parameter.yaml'
+        shutil.copyfile(os.path.join(args.deploy_dir, docker_env),
+                        os.path.join(APEX_TEMP_DIR, docker_env))
+        c_builder.prepare_container_images(
+            os.path.join(APEX_TEMP_DIR, docker_env),
+            branch=branch.replace('stable/', ''),
+            neutron_driver=c_builder.get_neutron_driver(ds_opts)
+        )
         # Install Undercloud
         # Install Undercloud
-        undercloud.configure(net_settings,
-                             os.path.join(args.lib_dir, ANSIBLE_PATH,
+        undercloud.configure(net_settings, deploy_settings,
+                             os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                           'configure_undercloud.yml'),
                                           'configure_undercloud.yml'),
-                             APEX_TEMP_DIR)
+                             APEX_TEMP_DIR, virtual_oc=args.virtual)
 
         # Prepare overcloud-full.qcow2
         logging.info("Preparing Overcloud for deployment...")
 
         # Prepare overcloud-full.qcow2
         logging.info("Preparing Overcloud for deployment...")
@@ -398,33 +454,69 @@ def main():
                                                         net_data_file)
         else:
             net_data = False
                                                         net_data_file)
         else:
             net_data = False
-        if upstream and args.env_file == 'opnfv-environment.yaml':
+
+        shutil.copyfile(os.path.join(args.deploy_dir, 'build_ovs_nsh.sh'),
+                        os.path.join(APEX_TEMP_DIR, 'build_ovs_nsh.sh'))
+
+        # TODO(trozet): Either fix opnfv env or default to use upstream env
+        if args.env_file == 'opnfv-environment.yaml':
             # Override the env_file if it is defaulted to opnfv
             # opnfv env file will not work with upstream
             args.env_file = 'upstream-environment.yaml'
         opnfv_env = os.path.join(args.deploy_dir, args.env_file)
             # Override the env_file if it is defaulted to opnfv
             # opnfv env file will not work with upstream
             args.env_file = 'upstream-environment.yaml'
         opnfv_env = os.path.join(args.deploy_dir, args.env_file)
-        if not upstream:
-            oc_deploy.prep_env(deploy_settings, net_settings, inventory,
-                               opnfv_env, net_env_target, APEX_TEMP_DIR)
-            oc_deploy.prep_image(deploy_settings, sdn_image, APEX_TEMP_DIR,
-                                 root_pw=root_pw)
-        else:
-            shutil.copyfile(sdn_image, os.path.join(APEX_TEMP_DIR,
-                                                    'overcloud-full.qcow2'))
-            shutil.copyfile(
-                opnfv_env,
-                os.path.join(APEX_TEMP_DIR, os.path.basename(opnfv_env))
-            )
+        oc_deploy.prep_env(deploy_settings, net_settings, inventory,
+                           opnfv_env, net_env_target, APEX_TEMP_DIR)
+        if not args.virtual:
+            oc_deploy.LOOP_DEVICE_SIZE = "50G"
+        patched_containers = oc_deploy.prep_image(
+            deploy_settings, net_settings, sdn_image, APEX_TEMP_DIR,
+            root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'])
 
         oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
                                     APEX_TEMP_DIR, args.virtual,
                                     os.path.basename(opnfv_env),
                                     net_data=net_data)
 
         oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
                                     APEX_TEMP_DIR, args.virtual,
                                     os.path.basename(opnfv_env),
                                     net_data=net_data)
-        deploy_playbook = os.path.join(args.lib_dir, ANSIBLE_PATH,
+        # Prepare undercloud with containers
+        docker_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
+                                       'prepare_overcloud_containers.yml')
+        if ds_opts['containers']:
+            logging.info("Preparing Undercloud with Docker containers")
+            sdn_env = oc_deploy.get_docker_sdn_files(ds_opts)
+            sdn_env_files = str()
+            for sdn_file in sdn_env:
+                sdn_env_files += " -e {}".format(sdn_file)
+            if patched_containers:
+                oc_builder.archive_docker_patches(APEX_TEMP_DIR)
+            container_vars = dict()
+            container_vars['apex_temp_dir'] = APEX_TEMP_DIR
+            container_vars['patched_docker_services'] = list(
+                patched_containers)
+            container_vars['container_tag'] = constants.DOCKER_TAG
+            container_vars['stackrc'] = 'source /home/stack/stackrc'
+            container_vars['sdn'] = ds_opts['sdn_controller']
+            container_vars['undercloud_ip'] = undercloud_admin_ip
+            container_vars['os_version'] = os_version
+            container_vars['aarch64'] = platform.machine() == 'aarch64'
+            container_vars['sdn_env_file'] = sdn_env_files
+            try:
+                utils.run_ansible(container_vars, docker_playbook,
+                                  host=undercloud.ip, user='stack',
+                                  tmp_dir=APEX_TEMP_DIR)
+                logging.info("Container preparation complete")
+            except Exception:
+                logging.error("Unable to complete container prep on "
+                              "Undercloud")
+                for tmp_file in UC_DISK_FILES:
+                    os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
+                os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
+                raise
+
+        deploy_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                        'deploy_overcloud.yml')
         virt_env = 'virtual-environment.yaml'
         bm_env = 'baremetal-environment.yaml'
                                        'deploy_overcloud.yml')
         virt_env = 'virtual-environment.yaml'
         bm_env = 'baremetal-environment.yaml'
-        for p_env in virt_env, bm_env:
+        k8s_env = 'kubernetes-environment.yaml'
+        for p_env in virt_env, bm_env, k8s_env:
             shutil.copyfile(os.path.join(args.deploy_dir, p_env),
                             os.path.join(APEX_TEMP_DIR, p_env))
 
             shutil.copyfile(os.path.join(args.deploy_dir, p_env),
                             os.path.join(APEX_TEMP_DIR, p_env))
 
@@ -434,13 +526,20 @@ def main():
         deploy_vars['virtual'] = args.virtual
         deploy_vars['debug'] = args.debug
         deploy_vars['aarch64'] = platform.machine() == 'aarch64'
         deploy_vars['virtual'] = args.virtual
         deploy_vars['debug'] = args.debug
         deploy_vars['aarch64'] = platform.machine() == 'aarch64'
+        deploy_vars['introspect'] = not (args.virtual or
+                                         deploy_vars['aarch64'] or
+                                         not introspect)
         deploy_vars['dns_server_args'] = ''
         deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
         deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
         deploy_vars['stackrc'] = 'source /home/stack/stackrc'
         deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
         deploy_vars['dns_server_args'] = ''
         deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
         deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
         deploy_vars['stackrc'] = 'source /home/stack/stackrc'
         deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
-        deploy_vars['upstream'] = upstream
+        deploy_vars['undercloud_ip'] = undercloud_admin_ip
+        deploy_vars['ha_enabled'] = ha_enabled
         deploy_vars['os_version'] = os_version
         deploy_vars['os_version'] = os_version
+        deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
+        deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
+        deploy_vars['vim'] = ds_opts['vim']
         for dns_server in net_settings['dns_servers']:
             deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
                 dns_server)
         for dns_server in net_settings['dns_servers']:
             deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
                 dns_server)
@@ -449,10 +548,15 @@ def main():
                               user='stack', tmp_dir=APEX_TEMP_DIR)
             logging.info("Overcloud deployment complete")
         except Exception:
                               user='stack', tmp_dir=APEX_TEMP_DIR)
             logging.info("Overcloud deployment complete")
         except Exception:
-            logging.error("Deployment Failed.  Please check log")
+            logging.error("Deployment Failed.  Please check deploy log as "
+                          "well as mistral logs in "
+                          "{}".format(os.path.join(APEX_TEMP_DIR,
+                                                   'mistral_logs.tar.gz')))
             raise
         finally:
             os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
             raise
         finally:
             os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
+            for tmp_file in UC_DISK_FILES:
+                os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
 
         # Post install
         logging.info("Executing post deploy configuration")
 
         # Post install
         logging.info("Executing post deploy configuration")
@@ -465,7 +569,7 @@ def main():
                                      'UserKnownHostsFile=/dev/null -o ' \
                                      'LogLevel=error'
         deploy_vars['external_network_cmds'] = \
                                      'UserKnownHostsFile=/dev/null -o ' \
                                      'LogLevel=error'
         deploy_vars['external_network_cmds'] = \
-            oc_deploy.external_network_cmds(net_settings)
+            oc_deploy.external_network_cmds(net_settings, deploy_settings)
         # TODO(trozet): just parse all ds_opts as deploy vars one time
         deploy_vars['gluon'] = ds_opts['gluon']
         deploy_vars['sdn'] = ds_opts['sdn_controller']
         # TODO(trozet): just parse all ds_opts as deploy vars one time
         deploy_vars['gluon'] = ds_opts['gluon']
         deploy_vars['sdn'] = ds_opts['sdn_controller']
@@ -483,37 +587,125 @@ def main():
         else:
             deploy_vars['congress'] = False
         deploy_vars['calipso'] = ds_opts.get('calipso', False)
         else:
             deploy_vars['congress'] = False
         deploy_vars['calipso'] = ds_opts.get('calipso', False)
-        deploy_vars['calipso_ip'] = net_settings['networks']['admin'][
-            'installer_vm']['ip']
-        # TODO(trozet): this is probably redundant with getting external
-        # network info from undercloud.py
-        if 'external' in net_settings.enabled_network_list:
-            ext_cidr = net_settings['networks']['external'][0]['cidr']
+        deploy_vars['calipso_ip'] = undercloud_admin_ip
+        # overcloudrc.v3 removed and set as default in queens and later
+        if os_version == 'pike':
+            deploy_vars['overcloudrc_files'] = ['overcloudrc',
+                                                'overcloudrc.v3']
         else:
         else:
-            ext_cidr = net_settings['networks']['admin']['cidr']
-        deploy_vars['external_cidr'] = str(ext_cidr)
-        if ext_cidr.version == 6:
-            deploy_vars['external_network_ipv6'] = True
-        else:
-            deploy_vars['external_network_ipv6'] = False
-        post_undercloud = os.path.join(args.lib_dir, ANSIBLE_PATH,
+            deploy_vars['overcloudrc_files'] = ['overcloudrc']
+
+        post_undercloud = os.path.join(args.lib_dir,
+                                       constants.ANSIBLE_PATH,
                                        'post_deploy_undercloud.yml')
                                        'post_deploy_undercloud.yml')
-        logging.info("Executing post deploy configuration undercloud playbook")
+        logging.info("Executing post deploy configuration undercloud "
+                     "playbook")
         try:
         try:
-            utils.run_ansible(deploy_vars, post_undercloud, host=undercloud.ip,
-                              user='stack', tmp_dir=APEX_TEMP_DIR)
+            utils.run_ansible(deploy_vars, post_undercloud,
+                              host=undercloud.ip, user='stack',
+                              tmp_dir=APEX_TEMP_DIR)
             logging.info("Post Deploy Undercloud Configuration Complete")
         except Exception:
             logging.error("Post Deploy Undercloud Configuration failed.  "
                           "Please check log")
             raise
             logging.info("Post Deploy Undercloud Configuration Complete")
         except Exception:
             logging.error("Post Deploy Undercloud Configuration failed.  "
                           "Please check log")
             raise
+
+        # Deploy kubernetes if enabled
+        # (TODO)zshi move handling of kubernetes deployment
+        # to its own deployment class
+        if deploy_vars['vim'] == 'k8s':
+            # clone kubespray repo
+            git.Repo.clone_from(constants.KUBESPRAY_URL,
+                                os.path.join(APEX_TEMP_DIR, 'kubespray'))
+            shutil.copytree(
+                os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
+                             'sample'),
+                os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
+                             'apex'))
+            k8s_node_inventory = {
+                'all':
+                    {'hosts': {},
+                     'children': {
+                         'k8s-cluster': {
+                             'children': {
+                                 'kube-master': {
+                                     'hosts': {}
+                                 },
+                                 'kube-node': {
+                                     'hosts': {}
+                                 }
+                             }
+                         },
+                         'etcd': {
+                             'hosts': {}
+                         }
+                    }
+                    }
+            }
+            for node, ip in deploy_vars['overcloud_nodes'].items():
+                k8s_node_inventory['all']['hosts'][node] = {
+                    'ansible_become': True,
+                    'ansible_ssh_host': ip,
+                    'ansible_become_user': 'root',
+                    'ip': ip
+                }
+                if 'controller' in node:
+                    k8s_node_inventory['all']['children']['k8s-cluster'][
+                        'children']['kube-master']['hosts'][node] = None
+                    k8s_node_inventory['all']['children']['etcd'][
+                        'hosts'][node] = None
+                elif 'compute' in node:
+                    k8s_node_inventory['all']['children']['k8s-cluster'][
+                        'children']['kube-node']['hosts'][node] = None
+
+            kubespray_dir = os.path.join(APEX_TEMP_DIR, 'kubespray')
+            with open(os.path.join(kubespray_dir, 'inventory', 'apex',
+                                   'apex.yaml'), 'w') as invfile:
+                yaml.dump(k8s_node_inventory, invfile,
+                          default_flow_style=False)
+            k8s_deploy_vars = {}
+            # Add kubespray ansible control variables in k8s_deploy_vars,
+            # example: 'kube_network_plugin': 'flannel'
+            k8s_deploy = os.path.join(kubespray_dir, 'cluster.yml')
+            k8s_deploy_inv_file = os.path.join(kubespray_dir, 'inventory',
+                                               'apex', 'apex.yaml')
+
+            k8s_remove_pkgs = os.path.join(args.lib_dir,
+                                           constants.ANSIBLE_PATH,
+                                           'k8s_remove_pkgs.yml')
+            try:
+                logging.debug("Removing any existing overcloud docker "
+                              "packages")
+                utils.run_ansible(k8s_deploy_vars, k8s_remove_pkgs,
+                                  host=k8s_deploy_inv_file,
+                                  user='heat-admin', tmp_dir=APEX_TEMP_DIR)
+                logging.info("k8s Deploy Remove Existing Docker Related "
+                             "Packages Complete")
+            except Exception:
+                logging.error("k8s Deploy Remove Existing Docker Related "
+                              "Packages failed. Please check log")
+                raise
+
+            try:
+                utils.run_ansible(k8s_deploy_vars, k8s_deploy,
+                                  host=k8s_deploy_inv_file,
+                                  user='heat-admin', tmp_dir=APEX_TEMP_DIR)
+                logging.info("k8s Deploy Overcloud Configuration Complete")
+            except Exception:
+                logging.error("k8s Deploy Overcloud Configuration failed."
+                              "Please check log")
+                raise
+
         # Post deploy overcloud node configuration
         # TODO(trozet): just parse all ds_opts as deploy vars one time
         deploy_vars['sfc'] = ds_opts['sfc']
         deploy_vars['vpn'] = ds_opts['vpn']
         # Post deploy overcloud node configuration
         # TODO(trozet): just parse all ds_opts as deploy vars one time
         deploy_vars['sfc'] = ds_opts['sfc']
         deploy_vars['vpn'] = ds_opts['vpn']
+        deploy_vars['l2gw'] = ds_opts.get('l2gw')
+        deploy_vars['sriov'] = ds_opts.get('sriov')
+        deploy_vars['tacker'] = ds_opts.get('tacker')
         # TODO(trozet): pull all logs and store in tmp dir in overcloud
         # playbook
         # TODO(trozet): pull all logs and store in tmp dir in overcloud
         # playbook
-        post_overcloud = os.path.join(args.lib_dir, ANSIBLE_PATH,
+        post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                       'post_deploy_overcloud.yml')
         # Run per overcloud node
         for node, ip in deploy_vars['overcloud_nodes'].items():
                                       'post_deploy_overcloud.yml')
         # Run per overcloud node
         for node, ip in deploy_vars['overcloud_nodes'].items():