##############################################################################
import argparse
+import git
import json
import logging
import os
import shutil
import sys
import tempfile
+import yaml
import apex.virtual.configure_vm as vm_lib
import apex.virtual.utils as virt_utils
default=False,
help='Use tripleo-quickstart to deploy')
deploy_parser.add_argument('--upstream', action='store_true',
- default=False,
+ default=True,
help='Force deployment to use upstream '
- 'artifacts')
+ 'artifacts. This option is now '
+ 'deprecated and only upstream '
+ 'deployments are supported.')
deploy_parser.add_argument('--no-fetch', action='store_true',
default=False,
help='Ignore fetching latest upstream and '
# Parse all settings
deploy_settings = DeploySettings(args.deploy_settings_file)
logging.info("Deploy settings are:\n {}".format(pprint.pformat(
- deploy_settings)))
+ deploy_settings)))
net_settings = NetworkSettings(args.network_settings_file)
logging.info("Network settings are:\n {}".format(pprint.pformat(
- net_settings)))
+ net_settings)))
os_version = deploy_settings['deploy_options']['os_version']
net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
net_env = NetworkEnvironment(net_settings, net_env_file,
else:
root_pw = None
- upstream = (os_version != constants.DEFAULT_OS_VERSION or
- args.upstream)
+ if not args.upstream:
+ logging.warning("Using upstream is now required for Apex. "
+ "Forcing upstream to true")
if os_version == 'master':
branch = 'master'
else:
branch = "stable/{}".format(os_version)
- if upstream:
- logging.info("Deploying with upstream artifacts for OpenStack "
- "{}".format(os_version))
- args.image_dir = os.path.join(args.image_dir, os_version)
- upstream_url = constants.UPSTREAM_RDO.replace(
- constants.DEFAULT_OS_VERSION, os_version)
- upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2']
- utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
- upstream_targets,
- fetch=not args.no_fetch)
- sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
- # copy undercloud so we don't taint upstream fetch
- uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
- uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2')
- shutil.copyfile(uc_fetch_img, uc_image)
- # prep undercloud with required packages
- uc_builder.add_upstream_packages(uc_image)
- # add patches from upstream to undercloud and overcloud
- logging.info('Adding patches to undercloud')
- patches = deploy_settings['global_params']['patches']
- c_builder.add_upstream_patches(patches['undercloud'], uc_image,
- APEX_TEMP_DIR, branch)
- else:
- sdn_image = os.path.join(args.image_dir, SDN_IMAGE)
- uc_image = 'undercloud.qcow2'
- # patches are ignored in non-upstream deployments
- patches = {'overcloud': [], 'undercloud': []}
+
+ logging.info("Deploying with upstream artifacts for OpenStack "
+ "{}".format(os_version))
+ args.image_dir = os.path.join(args.image_dir, os_version)
+ upstream_url = constants.UPSTREAM_RDO.replace(
+ constants.DEFAULT_OS_VERSION, os_version)
+ upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2']
+ utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
+ upstream_targets,
+ fetch=not args.no_fetch)
+ sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
+ # copy undercloud so we don't taint upstream fetch
+ uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
+ uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2')
+ shutil.copyfile(uc_fetch_img, uc_image)
+ # prep undercloud with required packages
+ uc_builder.add_upstream_packages(uc_image)
+ # add patches from upstream to undercloud and overcloud
+ logging.info('Adding patches to undercloud')
+ patches = deploy_settings['global_params']['patches']
+ c_builder.add_upstream_patches(patches['undercloud'], uc_image,
+ APEX_TEMP_DIR, branch)
+
# Create/Start Undercloud VM
undercloud = uc_lib.Undercloud(args.image_dir,
args.deploy_dir,
undercloud_admin_ip = net_settings['networks'][
constants.ADMIN_NETWORK]['installer_vm']['ip']
- if upstream and ds_opts['containers']:
+ if ds_opts['containers']:
tag = constants.DOCKER_TAG
else:
tag = None
net_data_file)
else:
net_data = False
- if upstream and args.env_file == 'opnfv-environment.yaml':
+
+ # TODO(trozet): Either fix opnfv env or default to use upstream env
+ if args.env_file == 'opnfv-environment.yaml':
# Override the env_file if it is defaulted to opnfv
# opnfv env file will not work with upstream
args.env_file = 'upstream-environment.yaml'
opnfv_env = os.path.join(args.deploy_dir, args.env_file)
- if not upstream:
- # TODO(trozet): Invoke with containers after Fraser migration
- oc_deploy.prep_env(deploy_settings, net_settings, inventory,
- opnfv_env, net_env_target, APEX_TEMP_DIR)
- else:
- shutil.copyfile(
- opnfv_env,
- os.path.join(APEX_TEMP_DIR, os.path.basename(opnfv_env))
- )
+ oc_deploy.prep_env(deploy_settings, net_settings, inventory,
+ opnfv_env, net_env_target, APEX_TEMP_DIR)
+ if not args.virtual:
+ oc_deploy.LOOP_DEVICE_SIZE = "50G"
patched_containers = oc_deploy.prep_image(
deploy_settings, net_settings, sdn_image, APEX_TEMP_DIR,
- root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'],
- upstream=upstream)
+ root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'])
oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
APEX_TEMP_DIR, args.virtual,
docker_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
'prepare_overcloud_containers.yml')
if ds_opts['containers']:
- ceph_version = constants.CEPH_VERSION_MAP[ds_opts['os_version']]
- ceph_docker_image = "ceph/daemon:tag-build-master-" \
- "{}-centos-7".format(ceph_version)
logging.info("Preparing Undercloud with Docker containers")
if patched_containers:
oc_builder.archive_docker_patches(APEX_TEMP_DIR)
patched_containers)
container_vars['container_tag'] = constants.DOCKER_TAG
container_vars['stackrc'] = 'source /home/stack/stackrc'
- container_vars['upstream'] = upstream
container_vars['sdn'] = ds_opts['sdn_controller']
container_vars['undercloud_ip'] = undercloud_admin_ip
container_vars['os_version'] = os_version
- container_vars['ceph_docker_image'] = ceph_docker_image
+ container_vars['aarch64'] = platform.machine() == 'aarch64'
container_vars['sdn_env_file'] = \
oc_deploy.get_docker_sdn_file(ds_opts)
try:
'deploy_overcloud.yml')
virt_env = 'virtual-environment.yaml'
bm_env = 'baremetal-environment.yaml'
- for p_env in virt_env, bm_env:
+ k8s_env = 'kubernetes-environment.yaml'
+ for p_env in virt_env, bm_env, k8s_env:
shutil.copyfile(os.path.join(args.deploy_dir, p_env),
os.path.join(APEX_TEMP_DIR, p_env))
deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
deploy_vars['stackrc'] = 'source /home/stack/stackrc'
deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
- deploy_vars['upstream'] = upstream
+ deploy_vars['undercloud_ip'] = undercloud_admin_ip
+ deploy_vars['ha_enabled'] = ha_enabled
deploy_vars['os_version'] = os_version
deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
+ deploy_vars['vim'] = ds_opts['vim']
for dns_server in net_settings['dns_servers']:
deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
dns_server)
user='stack', tmp_dir=APEX_TEMP_DIR)
logging.info("Overcloud deployment complete")
except Exception:
- logging.error("Deployment Failed. Please check log")
+ logging.error("Deployment Failed. Please check deploy log as "
+ "well as mistral logs in "
+ "{}".format(os.path.join(APEX_TEMP_DIR,
+ 'mistral_logs.tar.gz')))
raise
finally:
os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
else:
deploy_vars['overcloudrc_files'] = ['overcloudrc']
- post_undercloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
+ post_undercloud = os.path.join(args.lib_dir,
+ constants.ANSIBLE_PATH,
'post_deploy_undercloud.yml')
- logging.info("Executing post deploy configuration undercloud playbook")
+ logging.info("Executing post deploy configuration undercloud "
+ "playbook")
try:
- utils.run_ansible(deploy_vars, post_undercloud, host=undercloud.ip,
- user='stack', tmp_dir=APEX_TEMP_DIR)
+ utils.run_ansible(deploy_vars, post_undercloud,
+ host=undercloud.ip, user='stack',
+ tmp_dir=APEX_TEMP_DIR)
logging.info("Post Deploy Undercloud Configuration Complete")
except Exception:
logging.error("Post Deploy Undercloud Configuration failed. "
"Please check log")
raise
+
+ # Deploy kubernetes if enabled
+ # (TODO)zshi move handling of kubernetes deployment
+ # to its own deployment class
+ if deploy_vars['vim'] == 'k8s':
+ # clone kubespray repo
+ git.Repo.clone_from(constants.KUBESPRAY_URL,
+ os.path.join(APEX_TEMP_DIR, 'kubespray'))
+ shutil.copytree(
+ os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
+ 'sample'),
+ os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
+ 'apex'))
+ k8s_node_inventory = {
+ 'all':
+ {'hosts': {},
+ 'children': {
+ 'k8s-cluster': {
+ 'children': {
+ 'kube-master': {
+ 'hosts': {}
+ },
+ 'kube-node': {
+ 'hosts': {}
+ }
+ }
+ },
+ 'etcd': {
+ 'hosts': {}
+ }
+ }
+ }
+ }
+ for node, ip in deploy_vars['overcloud_nodes'].items():
+ k8s_node_inventory['all']['hosts'][node] = {
+ 'ansible_become': True,
+ 'ansible_ssh_host': ip,
+ 'ansible_become_user': 'root',
+ 'ip': ip
+ }
+ if 'controller' in node:
+ k8s_node_inventory['all']['children']['k8s-cluster'][
+ 'children']['kube-master']['hosts'][node] = None
+ k8s_node_inventory['all']['children']['etcd'][
+ 'hosts'][node] = None
+ elif 'compute' in node:
+ k8s_node_inventory['all']['children']['k8s-cluster'][
+ 'children']['kube-node']['hosts'][node] = None
+
+ kubespray_dir = os.path.join(APEX_TEMP_DIR, 'kubespray')
+ with open(os.path.join(kubespray_dir, 'inventory', 'apex',
+ 'apex.yaml'), 'w') as invfile:
+ yaml.dump(k8s_node_inventory, invfile,
+ default_flow_style=False)
+ k8s_deploy_vars = {}
+ # Add kubespray ansible control variables in k8s_deploy_vars,
+ # example: 'kube_network_plugin': 'flannel'
+ k8s_deploy = os.path.join(kubespray_dir, 'cluster.yml')
+ k8s_deploy_inv_file = os.path.join(kubespray_dir, 'inventory',
+ 'apex', 'apex.yaml')
+
+ k8s_remove_pkgs = os.path.join(args.lib_dir,
+ constants.ANSIBLE_PATH,
+ 'k8s_remove_pkgs.yml')
+ try:
+ logging.debug("Removing any existing overcloud docker "
+ "packages")
+ utils.run_ansible(k8s_deploy_vars, k8s_remove_pkgs,
+ host=k8s_deploy_inv_file,
+ user='heat-admin', tmp_dir=APEX_TEMP_DIR)
+ logging.info("k8s Deploy Remove Existing Docker Related "
+ "Packages Complete")
+ except Exception:
+ logging.error("k8s Deploy Remove Existing Docker Related "
+ "Packages failed. Please check log")
+ raise
+
+ try:
+ utils.run_ansible(k8s_deploy_vars, k8s_deploy,
+ host=k8s_deploy_inv_file,
+ user='heat-admin', tmp_dir=APEX_TEMP_DIR)
+ logging.info("k8s Deploy Overcloud Configuration Complete")
+ except Exception:
+ logging.error("k8s Deploy Overcloud Configuration failed."
+ "Please check log")
+ raise
+
# Post deploy overcloud node configuration
# TODO(trozet): just parse all ds_opts as deploy vars one time
deploy_vars['sfc'] = ds_opts['sfc']
deploy_vars['vpn'] = ds_opts['vpn']
deploy_vars['l2gw'] = ds_opts.get('l2gw')
deploy_vars['sriov'] = ds_opts.get('sriov')
+ deploy_vars['tacker'] = ds_opts.get('tacker')
# TODO(trozet): pull all logs and store in tmp dir in overcloud
# playbook
post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,