Fixes undercloud install failure with setting hostname
[apex.git] / apex / deploy.py
index cbecee8..d0c2b20 100644 (file)
@@ -10,6 +10,7 @@
 ##############################################################################
 
 import argparse
+import git
 import json
 import logging
 import os
@@ -18,6 +19,7 @@ import pprint
 import shutil
 import sys
 import tempfile
+import yaml
 
 import apex.virtual.configure_vm as vm_lib
 import apex.virtual.utils as virt_utils
@@ -28,10 +30,12 @@ from apex import DeploySettings
 from apex import Inventory
 from apex import NetworkEnvironment
 from apex import NetworkSettings
+from apex.deployment.snapshot import SnapshotDeployment
 from apex.common import utils
 from apex.common import constants
 from apex.common import parsers
 from apex.common.exceptions import ApexDeployException
+from apex.deployment.tripleo import ApexDeployment
 from apex.network import jumphost
 from apex.network import network_data
 from apex.undercloud import undercloud as uc_lib
@@ -40,11 +44,12 @@ from apex.overcloud import deploy as oc_deploy
 
 APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
 SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
-
-
-def deploy_quickstart(args, deploy_settings_file, network_settings_file,
-                      inventory_file=None):
-    pass
+UC_DISK_FILES = [
+    'overcloud-full.vmlinuz',
+    'overcloud-full.initrd',
+    'ironic-python-agent.initramfs',
+    'ironic-python-agent.kernel'
+]
 
 
 def validate_cross_settings(deploy_settings, net_settings, inventory):
@@ -112,7 +117,7 @@ def create_deploy_parser():
                                help='File which contains Apex deploy settings')
     deploy_parser.add_argument('-n', '--network-settings',
                                dest='network_settings_file',
-                               required=True,
+                               required=False,
                                help='File which contains Apex network '
                                     'settings')
     deploy_parser.add_argument('-i', '--inventory-file',
@@ -173,17 +178,29 @@ def create_deploy_parser():
                                default='/usr/share/opnfv-apex',
                                help='Directory path for apex ansible '
                                     'and third party libs')
-    deploy_parser.add_argument('--quickstart', action='store_true',
+    deploy_parser.add_argument('-s', '--snapshot', action='store_true',
                                default=False,
-                               help='Use tripleo-quickstart to deploy')
+                               help='Use snapshots for deployment')
+    deploy_parser.add_argument('--snap-cache', dest='snap_cache',
+                               default="{}/snap_cache".format(
+                                   os.path.expanduser('~')),
+                               help='Local directory to cache snapshot '
+                                    'artifacts. Defaults to $HOME/snap_cache')
     deploy_parser.add_argument('--upstream', action='store_true',
-                               default=False,
+                               default=True,
                                help='Force deployment to use upstream '
-                                    'artifacts')
+                                    'artifacts. This option is now '
+                                    'deprecated and only upstream '
+                                    'deployments are supported.')
     deploy_parser.add_argument('--no-fetch', action='store_true',
                                default=False,
                                help='Ignore fetching latest upstream and '
                                     'use what is in cache')
+    deploy_parser.add_argument('-p', '--patches',
+                               default='/etc/opnfv-apex/common-patches.yaml',
+                               dest='patches_file',
+                               help='File to include for common patches '
+                                    'which apply to all deployment scenarios')
     return deploy_parser
 
 
@@ -195,20 +212,25 @@ def validate_deploy_args(args):
     """
 
     logging.debug('Validating arguments for deployment')
-    if args.virtual and args.inventory_file is not None:
+    if args.snapshot:
+        logging.debug('Skipping inventory validation as it is not applicable'
+                      'to snapshot deployments')
+    elif args.virtual and args.inventory_file is not None:
         logging.error("Virtual enabled but inventory file also given")
         raise ApexDeployException('You should not specify an inventory file '
                                   'with virtual deployments')
     elif args.virtual:
         args.inventory_file = os.path.join(APEX_TEMP_DIR,
                                            'inventory-virt.yaml')
-    elif os.path.isfile(args.inventory_file) is False:
+    elif not os.path.isfile(args.inventory_file):
         logging.error("Specified inventory file does not exist: {}".format(
             args.inventory_file))
         raise ApexDeployException('Specified inventory file does not exist')
 
     for settings_file in (args.deploy_settings_file,
                           args.network_settings_file):
+        if settings_file == args.network_settings_file and args.snapshot:
+            continue
         if os.path.isfile(settings_file) is False:
             logging.error("Specified settings file does not "
                           "exist: {}".format(settings_file))
@@ -242,74 +264,94 @@ def main():
     # Parse all settings
     deploy_settings = DeploySettings(args.deploy_settings_file)
     logging.info("Deploy settings are:\n {}".format(pprint.pformat(
-                 deploy_settings)))
-    net_settings = NetworkSettings(args.network_settings_file)
-    logging.info("Network settings are:\n {}".format(pprint.pformat(
-                 net_settings)))
-    os_version = deploy_settings['deploy_options']['os_version']
-    net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
-    net_env = NetworkEnvironment(net_settings, net_env_file,
-                                 os_version=os_version)
-    net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
-    utils.dump_yaml(dict(net_env), net_env_target)
-
-    # get global deploy params
-    ha_enabled = deploy_settings['global_params']['ha_enabled']
-    introspect = deploy_settings['global_params'].get('introspect', True)
-
-    if args.virtual:
-        if args.virt_compute_ram is None:
-            compute_ram = args.virt_default_ram
-        else:
-            compute_ram = args.virt_compute_ram
-        if deploy_settings['deploy_options']['sdn_controller'] == \
-                'opendaylight' and args.virt_default_ram < 12:
-            control_ram = 12
-            logging.warning('RAM per controller is too low.  OpenDaylight '
-                            'requires at least 12GB per controller.')
-            logging.info('Increasing RAM per controller to 12GB')
-        elif args.virt_default_ram < 10:
-            control_ram = 10
-            logging.warning('RAM per controller is too low.  nosdn '
-                            'requires at least 10GB per controller.')
-            logging.info('Increasing RAM per controller to 10GB')
-        else:
-            control_ram = args.virt_default_ram
-        if ha_enabled and args.virt_compute_nodes < 2:
-            logging.debug('HA enabled, bumping number of compute nodes to 2')
-            args.virt_compute_nodes = 2
-        virt_utils.generate_inventory(args.inventory_file, ha_enabled,
-                                      num_computes=args.virt_compute_nodes,
-                                      controller_ram=control_ram * 1024,
-                                      compute_ram=compute_ram * 1024,
-                                      vcpus=args.virt_cpus
-                                      )
-    inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
-
-    validate_cross_settings(deploy_settings, net_settings, inventory)
+        deploy_settings)))
+
+    if not args.snapshot:
+        net_settings = NetworkSettings(args.network_settings_file)
+        logging.info("Network settings are:\n {}".format(pprint.pformat(
+            net_settings)))
+        os_version = deploy_settings['deploy_options']['os_version']
+        net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
+        net_env = NetworkEnvironment(net_settings, net_env_file,
+                                     os_version=os_version)
+        net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
+        utils.dump_yaml(dict(net_env), net_env_target)
+
+        # get global deploy params
+        ha_enabled = deploy_settings['global_params']['ha_enabled']
+        introspect = deploy_settings['global_params'].get('introspect', True)
+        net_list = net_settings.enabled_network_list
+        if args.virtual:
+            if args.virt_compute_ram is None:
+                compute_ram = args.virt_default_ram
+            else:
+                compute_ram = args.virt_compute_ram
+            if (deploy_settings['deploy_options']['sdn_controller'] ==
+                    'opendaylight' and args.virt_default_ram < 12):
+                control_ram = 12
+                logging.warning('RAM per controller is too low.  OpenDaylight '
+                                'requires at least 12GB per controller.')
+                logging.info('Increasing RAM per controller to 12GB')
+            elif args.virt_default_ram < 10:
+                if platform.machine() == 'aarch64':
+                    control_ram = 16
+                    logging.warning('RAM per controller is too low for '
+                                    'aarch64 ')
+                    logging.info('Increasing RAM per controller to 16GB')
+                else:
+                    control_ram = 10
+                    logging.warning('RAM per controller is too low.  nosdn '
+                                    'requires at least 10GB per controller.')
+                    logging.info('Increasing RAM per controller to 10GB')
+            else:
+                control_ram = args.virt_default_ram
+            if platform.machine() == 'aarch64' and args.virt_cpus < 16:
+                vcpus = 16
+                logging.warning('aarch64 requires at least 16 vCPUS per '
+                                'target VM. Increasing to 16.')
+            else:
+                vcpus = args.virt_cpus
+            if ha_enabled and args.virt_compute_nodes < 2:
+                logging.debug(
+                    'HA enabled, bumping number of compute nodes to 2')
+                args.virt_compute_nodes = 2
+            virt_utils.generate_inventory(args.inventory_file, ha_enabled,
+                                          num_computes=args.virt_compute_nodes,
+                                          controller_ram=control_ram * 1024,
+                                          compute_ram=compute_ram * 1024,
+                                          vcpus=vcpus
+                                          )
+        inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
+        logging.info("Inventory is:\n {}".format(pprint.pformat(
+            inventory)))
+
+        validate_cross_settings(deploy_settings, net_settings, inventory)
+    else:
+        # only one network with snapshots
+        net_list = [constants.ADMIN_NETWORK]
+
     ds_opts = deploy_settings['deploy_options']
-    if args.quickstart:
-        deploy_settings_file = os.path.join(APEX_TEMP_DIR,
-                                            'apex_deploy_settings.yaml')
-        utils.dump_yaml(utils.dict_objects_to_str(deploy_settings),
-                        deploy_settings_file)
-        logging.info("File created: {}".format(deploy_settings_file))
-        network_settings_file = os.path.join(APEX_TEMP_DIR,
-                                             'apex_network_settings.yaml')
-        utils.dump_yaml(utils.dict_objects_to_str(net_settings),
-                        network_settings_file)
-        logging.info("File created: {}".format(network_settings_file))
-        deploy_quickstart(args, deploy_settings_file, network_settings_file,
-                          args.inventory_file)
+    ansible_args = {
+        'virsh_enabled_networks': net_list,
+        'snapshot': args.snapshot
+    }
+    utils.run_ansible(ansible_args,
+                      os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
+                                   'deploy_dependencies.yml'))
+    all_in_one = not bool(args.virt_compute_nodes)
+    if args.snapshot:
+        # Start snapshot Deployment
+        logging.info('Executing Snapshot Deployment...')
+        SnapshotDeployment(deploy_settings=deploy_settings,
+                           snap_cache_dir=args.snap_cache,
+                           fetch=not args.no_fetch,
+                           all_in_one=all_in_one)
     else:
+        # Start Standard TripleO Deployment
+        deployment = ApexDeployment(deploy_settings, args.patches_file,
+                                    args.deploy_settings_file)
         # TODO (trozet): add logic back from:
         # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
-        ansible_args = {
-            'virsh_enabled_networks': net_settings.enabled_network_list
-        }
-        utils.run_ansible(ansible_args,
-                          os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
-                                       'deploy_dependencies.yml'))
         uc_external = False
         if 'external' in net_settings.enabled_network_list:
             uc_external = True
@@ -341,39 +383,53 @@ def main():
         else:
             root_pw = None
 
-        upstream = (os_version != constants.DEFAULT_OS_VERSION or
-                    args.upstream)
+        if not args.upstream:
+            logging.warning("Using upstream is now required for Apex. "
+                            "Forcing upstream to true")
         if os_version == 'master':
             branch = 'master'
         else:
             branch = "stable/{}".format(os_version)
-        if upstream:
-            logging.info("Deploying with upstream artifacts for OpenStack "
-                         "{}".format(os_version))
-            args.image_dir = os.path.join(args.image_dir, os_version)
-            upstream_url = constants.UPSTREAM_RDO.replace(
-                constants.DEFAULT_OS_VERSION, os_version)
-            upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2']
-            utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
-                                            upstream_targets,
-                                            fetch=not args.no_fetch)
-            sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
-            # copy undercloud so we don't taint upstream fetch
-            uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
-            uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2')
-            shutil.copyfile(uc_fetch_img, uc_image)
-            # prep undercloud with required packages
-            uc_builder.add_upstream_packages(uc_image)
-            # add patches from upstream to undercloud and overcloud
-            logging.info('Adding patches to undercloud')
-            patches = deploy_settings['global_params']['patches']
-            c_builder.add_upstream_patches(patches['undercloud'], uc_image,
-                                           APEX_TEMP_DIR, branch)
+
+        logging.info("Deploying with upstream artifacts for OpenStack "
+                     "{}".format(os_version))
+        args.image_dir = os.path.join(args.image_dir, os_version)
+        upstream_url = constants.UPSTREAM_RDO.replace(
+            constants.DEFAULT_OS_VERSION, os_version)
+
+        upstream_targets = ['overcloud-full.tar', 'ironic-python-agent.tar']
+        if platform.machine() == 'aarch64':
+            upstream_targets.append('undercloud.qcow2')
+        utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
+                                        upstream_targets,
+                                        fetch=not args.no_fetch)
+        # Copy ironic files and overcloud ramdisk and kernel into temp dir
+        # to be copied by ansible into undercloud /home/stack
+        # Note the overcloud disk does not need to be copied here as it will
+        # be modified and copied later
+        for tmp_file in UC_DISK_FILES:
+            shutil.copyfile(os.path.join(args.image_dir, tmp_file),
+                            os.path.join(APEX_TEMP_DIR, tmp_file))
+        if platform.machine() == 'aarch64':
+            sdn_image = os.path.join(args.image_dir, 'undercloud.qcow2')
         else:
-            sdn_image = os.path.join(args.image_dir, SDN_IMAGE)
-            uc_image = 'undercloud.qcow2'
-            # patches are ignored in non-upstream deployments
-            patches = {'overcloud': [], 'undercloud': []}
+            sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
+        # copy undercloud so we don't taint upstream fetch
+        uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
+        uc_fetch_img = sdn_image
+        shutil.copyfile(uc_fetch_img, uc_image)
+        # prep undercloud with required packages
+        if platform.machine() != 'aarch64':
+            uc_builder.update_repos(image=uc_image,
+                                    branch=branch.replace('stable/', ''))
+        uc_builder.add_upstream_packages(uc_image)
+        uc_builder.inject_calipso_installer(APEX_TEMP_DIR, uc_image)
+        # add patches from upstream to undercloud and overcloud
+        logging.info('Adding patches to undercloud')
+        patches = deployment.determine_patches()
+        c_builder.add_upstream_patches(patches['undercloud'], uc_image,
+                                       APEX_TEMP_DIR, branch)
+
         # Create/Start Undercloud VM
         undercloud = uc_lib.Undercloud(args.image_dir,
                                        args.deploy_dir,
@@ -385,7 +441,7 @@ def main():
         undercloud_admin_ip = net_settings['networks'][
             constants.ADMIN_NETWORK]['installer_vm']['ip']
 
-        if upstream and ds_opts['containers']:
+        if ds_opts['containers']:
             tag = constants.DOCKER_TAG
         else:
             tag = None
@@ -394,6 +450,21 @@ def main():
         for role in 'compute', 'controller':
             oc_cfg.create_nic_template(net_settings, deploy_settings, role,
                                        args.deploy_dir, APEX_TEMP_DIR)
+        # Prepare/Upload docker images
+        docker_env = 'containers-prepare-parameter.yaml'
+        shutil.copyfile(os.path.join(args.deploy_dir, docker_env),
+                        os.path.join(APEX_TEMP_DIR, docker_env))
+        # Upload extra ansible.cfg
+        if platform.machine() == 'aarch64':
+            ansible_env = 'ansible.cfg'
+            shutil.copyfile(os.path.join(args.deploy_dir, ansible_env),
+                            os.path.join(APEX_TEMP_DIR, ansible_env))
+
+        c_builder.prepare_container_images(
+            os.path.join(APEX_TEMP_DIR, docker_env),
+            branch=branch.replace('stable/', ''),
+            neutron_driver=c_builder.get_neutron_driver(ds_opts)
+        )
         # Install Undercloud
         undercloud.configure(net_settings, deploy_settings,
                              os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
@@ -408,24 +479,27 @@ def main():
                                                         net_data_file)
         else:
             net_data = False
-        if upstream and args.env_file == 'opnfv-environment.yaml':
+
+        shutil.copyfile(os.path.join(args.deploy_dir, 'build_ovs_nsh.sh'),
+                        os.path.join(APEX_TEMP_DIR, 'build_ovs_nsh.sh'))
+
+        # TODO(trozet): Either fix opnfv env or default to use upstream env
+        if args.env_file == 'opnfv-environment.yaml':
             # Override the env_file if it is defaulted to opnfv
             # opnfv env file will not work with upstream
             args.env_file = 'upstream-environment.yaml'
         opnfv_env = os.path.join(args.deploy_dir, args.env_file)
-        if not upstream:
-            # TODO(trozet): Invoke with containers after Fraser migration
-            oc_deploy.prep_env(deploy_settings, net_settings, inventory,
-                               opnfv_env, net_env_target, APEX_TEMP_DIR)
+        oc_deploy.prep_env(deploy_settings, net_settings, inventory,
+                           opnfv_env, net_env_target, APEX_TEMP_DIR)
+        if not args.virtual:
+            oc_deploy.LOOP_DEVICE_SIZE = "50G"
+        if platform.machine() == 'aarch64':
+            oc_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
         else:
-            shutil.copyfile(
-                opnfv_env,
-                os.path.join(APEX_TEMP_DIR, os.path.basename(opnfv_env))
-            )
+            oc_image = sdn_image
         patched_containers = oc_deploy.prep_image(
-            deploy_settings, net_settings, sdn_image, APEX_TEMP_DIR,
-            root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'],
-            upstream=upstream)
+            deploy_settings, net_settings, oc_image, APEX_TEMP_DIR,
+            root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'])
 
         oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
                                     APEX_TEMP_DIR, args.virtual,
@@ -435,10 +509,11 @@ def main():
         docker_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
                                        'prepare_overcloud_containers.yml')
         if ds_opts['containers']:
-            ceph_version = constants.CEPH_VERSION_MAP[ds_opts['os_version']]
-            ceph_docker_image = "ceph/daemon:tag-build-master-" \
-                                "{}-centos-7".format(ceph_version)
             logging.info("Preparing Undercloud with Docker containers")
+            sdn_env = oc_deploy.get_docker_sdn_files(ds_opts)
+            sdn_env_files = str()
+            for sdn_file in sdn_env:
+                sdn_env_files += " -e {}".format(sdn_file)
             if patched_containers:
                 oc_builder.archive_docker_patches(APEX_TEMP_DIR)
             container_vars = dict()
@@ -447,13 +522,13 @@ def main():
                 patched_containers)
             container_vars['container_tag'] = constants.DOCKER_TAG
             container_vars['stackrc'] = 'source /home/stack/stackrc'
-            container_vars['upstream'] = upstream
             container_vars['sdn'] = ds_opts['sdn_controller']
             container_vars['undercloud_ip'] = undercloud_admin_ip
             container_vars['os_version'] = os_version
-            container_vars['ceph_docker_image'] = ceph_docker_image
-            container_vars['sdn_env_file'] = \
-                oc_deploy.get_docker_sdn_file(ds_opts)
+            container_vars['aarch64'] = platform.machine() == 'aarch64'
+            container_vars['sdn_env_file'] = sdn_env_files
+            container_vars['container_client'] = utils.find_container_client(
+                os_version)
             try:
                 utils.run_ansible(container_vars, docker_playbook,
                                   host=undercloud.ip, user='stack',
@@ -462,6 +537,8 @@ def main():
             except Exception:
                 logging.error("Unable to complete container prep on "
                               "Undercloud")
+                for tmp_file in UC_DISK_FILES:
+                    os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
                 os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
                 raise
 
@@ -469,7 +546,8 @@ def main():
                                        'deploy_overcloud.yml')
         virt_env = 'virtual-environment.yaml'
         bm_env = 'baremetal-environment.yaml'
-        for p_env in virt_env, bm_env:
+        k8s_env = 'kubernetes-environment.yaml'
+        for p_env in virt_env, bm_env, k8s_env:
             shutil.copyfile(os.path.join(args.deploy_dir, p_env),
                             os.path.join(APEX_TEMP_DIR, p_env))
 
@@ -487,12 +565,14 @@ def main():
         deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
         deploy_vars['stackrc'] = 'source /home/stack/stackrc'
         deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
-        deploy_vars['upstream'] = upstream
         deploy_vars['undercloud_ip'] = undercloud_admin_ip
         deploy_vars['ha_enabled'] = ha_enabled
         deploy_vars['os_version'] = os_version
         deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
         deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
+        deploy_vars['vim'] = ds_opts['vim']
+        deploy_vars['container_client'] = utils.find_container_client(
+            os_version)
         for dns_server in net_settings['dns_servers']:
             deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
                 dns_server)
@@ -508,6 +588,8 @@ def main():
             raise
         finally:
             os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
+            for tmp_file in UC_DISK_FILES:
+                os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
 
         # Post install
         logging.info("Executing post deploy configuration")
@@ -546,17 +628,107 @@ def main():
         else:
             deploy_vars['overcloudrc_files'] = ['overcloudrc']
 
-        post_undercloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
+        post_undercloud = os.path.join(args.lib_dir,
+                                       constants.ANSIBLE_PATH,
                                        'post_deploy_undercloud.yml')
-        logging.info("Executing post deploy configuration undercloud playbook")
+        logging.info("Executing post deploy configuration undercloud "
+                     "playbook")
         try:
-            utils.run_ansible(deploy_vars, post_undercloud, host=undercloud.ip,
-                              user='stack', tmp_dir=APEX_TEMP_DIR)
+            utils.run_ansible(deploy_vars, post_undercloud,
+                              host=undercloud.ip, user='stack',
+                              tmp_dir=APEX_TEMP_DIR)
             logging.info("Post Deploy Undercloud Configuration Complete")
         except Exception:
             logging.error("Post Deploy Undercloud Configuration failed.  "
                           "Please check log")
             raise
+
+        # Deploy kubernetes if enabled
+        # (TODO)zshi move handling of kubernetes deployment
+        # to its own deployment class
+        if deploy_vars['vim'] == 'k8s':
+            # clone kubespray repo
+            git.Repo.clone_from(constants.KUBESPRAY_URL,
+                                os.path.join(APEX_TEMP_DIR, 'kubespray'))
+            shutil.copytree(
+                os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
+                             'sample'),
+                os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
+                             'apex'))
+            k8s_node_inventory = {
+                'all':
+                    {'hosts': {},
+                     'children': {
+                         'k8s-cluster': {
+                             'children': {
+                                 'kube-master': {
+                                     'hosts': {}
+                                 },
+                                 'kube-node': {
+                                     'hosts': {}
+                                 }
+                             }
+                         },
+                         'etcd': {
+                             'hosts': {}
+                         }
+                    }
+                    }
+            }
+            for node, ip in deploy_vars['overcloud_nodes'].items():
+                k8s_node_inventory['all']['hosts'][node] = {
+                    'ansible_become': True,
+                    'ansible_ssh_host': ip,
+                    'ansible_become_user': 'root',
+                    'ip': ip
+                }
+                if 'controller' in node:
+                    k8s_node_inventory['all']['children']['k8s-cluster'][
+                        'children']['kube-master']['hosts'][node] = None
+                    k8s_node_inventory['all']['children']['etcd'][
+                        'hosts'][node] = None
+                elif 'compute' in node:
+                    k8s_node_inventory['all']['children']['k8s-cluster'][
+                        'children']['kube-node']['hosts'][node] = None
+
+            kubespray_dir = os.path.join(APEX_TEMP_DIR, 'kubespray')
+            with open(os.path.join(kubespray_dir, 'inventory', 'apex',
+                                   'apex.yaml'), 'w') as invfile:
+                yaml.dump(k8s_node_inventory, invfile,
+                          default_flow_style=False)
+            k8s_deploy_vars = {}
+            # Add kubespray ansible control variables in k8s_deploy_vars,
+            # example: 'kube_network_plugin': 'flannel'
+            k8s_deploy = os.path.join(kubespray_dir, 'cluster.yml')
+            k8s_deploy_inv_file = os.path.join(kubespray_dir, 'inventory',
+                                               'apex', 'apex.yaml')
+
+            k8s_remove_pkgs = os.path.join(args.lib_dir,
+                                           constants.ANSIBLE_PATH,
+                                           'k8s_remove_pkgs.yml')
+            try:
+                logging.debug("Removing any existing overcloud docker "
+                              "packages")
+                utils.run_ansible(k8s_deploy_vars, k8s_remove_pkgs,
+                                  host=k8s_deploy_inv_file,
+                                  user='heat-admin', tmp_dir=APEX_TEMP_DIR)
+                logging.info("k8s Deploy Remove Existing Docker Related "
+                             "Packages Complete")
+            except Exception:
+                logging.error("k8s Deploy Remove Existing Docker Related "
+                              "Packages failed. Please check log")
+                raise
+
+            try:
+                utils.run_ansible(k8s_deploy_vars, k8s_deploy,
+                                  host=k8s_deploy_inv_file,
+                                  user='heat-admin', tmp_dir=APEX_TEMP_DIR)
+                logging.info("k8s Deploy Overcloud Configuration Complete")
+            except Exception:
+                logging.error("k8s Deploy Overcloud Configuration failed."
+                              "Please check log")
+                raise
+
         # Post deploy overcloud node configuration
         # TODO(trozet): just parse all ds_opts as deploy vars one time
         deploy_vars['sfc'] = ds_opts['sfc']
@@ -564,6 +736,10 @@ def main():
         deploy_vars['l2gw'] = ds_opts.get('l2gw')
         deploy_vars['sriov'] = ds_opts.get('sriov')
         deploy_vars['tacker'] = ds_opts.get('tacker')
+        deploy_vars['all_in_one'] = all_in_one
+        # TODO(trozet): need to set container client to docker until OOO
+        # migrates OC to podman. Remove this later.
+        deploy_vars['container_client'] = 'docker'
         # TODO(trozet): pull all logs and store in tmp dir in overcloud
         # playbook
         post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,