import json
import logging
import os
+import platform
import pprint
import shutil
import sys
import tempfile
import apex.virtual.configure_vm as vm_lib
-import apex.virtual.virtual_utils as virt_utils
+import apex.virtual.utils as virt_utils
from apex import DeploySettings
from apex import Inventory
from apex import NetworkEnvironment
from apex.overcloud import config as oc_cfg
from apex.overcloud import overcloud_deploy
-APEX_TEMP_DIR = tempfile.mkdtemp()
+APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
ANSIBLE_PATH = 'ansible/playbooks'
SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
raise ApexDeployException("Setting a DPDK based dataplane requires"
"a dedicated NIC for tenant network")
+ if 'odl_vpp_routing_node' in deploy_settings['deploy_options']:
+ if deploy_settings['deploy_options']['dataplane'] != 'fdio':
+ raise ApexDeployException("odl_vpp_routing_node should only be set"
+ "when dataplane is set to fdio")
+ if deploy_settings['deploy_options'].get('dvr') is True:
+ raise ApexDeployException("odl_vpp_routing_node should only be set"
+ "when dvr is not enabled")
+
# TODO(trozet): add more checks here like RAM for ODL, etc
# check if odl_vpp_netvirt is true and vpp is set
# Check if fdio and nosdn:
# "$tenant_nic_mapping_compute_members
-def build_vms(inventory, network_settings):
+def build_vms(inventory, network_settings,
+ template_dir='/usr/share/opnfv-apex'):
"""
Creates VMs and configures vbmc and host
:param inventory:
name = 'baremetal{}'.format(idx)
volume = name + ".qcow2"
volume_path = os.path.join(constants.LIBVIRT_VOLUME_PATH, volume)
- # TODO(trozet): add back aarch64
# TODO(trozet): add error checking
vm_lib.create_vm(
name, volume_path,
baremetal_interfaces=network_settings.enabled_network_list,
memory=node['memory'], cpus=node['cpu'],
- macs=[node['mac_address']])
+ macs=node['mac'],
+ template_dir=template_dir)
virt_utils.host_setup({name: node['pm_port']})
ansible_args = {
'virsh_enabled_networks': net_settings.enabled_network_list
}
- ansible_path = os.path.join(args.lib_dir, ANSIBLE_PATH)
utils.run_ansible(ansible_args,
- os.path.join(args.lib_dir,
- ansible_path,
+ os.path.join(args.lib_dir, ANSIBLE_PATH,
'deploy_dependencies.yml'))
uc_external = False
if 'external' in net_settings.enabled_network_list:
uc_external = True
if args.virtual:
# create all overcloud VMs
- build_vms(inventory, net_settings)
+ build_vms(inventory, net_settings, args.deploy_dir)
else:
# Attach interfaces to jumphost for baremetal deployment
jump_networks = ['admin']
else:
root_pw = None
undercloud = uc_lib.Undercloud(args.image_dir,
+ args.deploy_dir,
root_pw=root_pw,
external_network=uc_external)
undercloud.start()
args.deploy_dir, APEX_TEMP_DIR)
# Install Undercloud
undercloud.configure(net_settings,
- os.path.join(args.lib_dir,
- ansible_path,
+ os.path.join(args.lib_dir, ANSIBLE_PATH,
'configure_undercloud.yml'),
APEX_TEMP_DIR)
overcloud_deploy.prep_image(deploy_settings, sdn_image, APEX_TEMP_DIR,
root_pw=root_pw)
opnfv_env = os.path.join(args.deploy_dir, args.env_file)
- overcloud_deploy.prep_env(deploy_settings, net_settings, opnfv_env,
- net_env_target, APEX_TEMP_DIR)
+ overcloud_deploy.prep_env(deploy_settings, net_settings, inventory,
+ opnfv_env, net_env_target, APEX_TEMP_DIR)
overcloud_deploy.create_deploy_cmd(deploy_settings, net_settings,
inventory, APEX_TEMP_DIR,
args.virtual, args.env_file)
- deploy_playbook = os.path.join(args.lib_dir, ansible_path,
+ deploy_playbook = os.path.join(args.lib_dir, ANSIBLE_PATH,
'deploy_overcloud.yml')
virt_env = 'virtual-environment.yaml'
bm_env = 'baremetal-environment.yaml'
deploy_vars = dict()
deploy_vars['virtual'] = args.virtual
deploy_vars['debug'] = args.debug
+ deploy_vars['aarch64'] = platform.machine() == 'aarch64'
deploy_vars['dns_server_args'] = ''
deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
deploy_vars['stackrc'] = 'source /home/stack/stackrc'
utils.run_ansible(deploy_vars, deploy_playbook, host=undercloud.ip,
user='stack', tmp_dir=APEX_TEMP_DIR)
logging.info("Overcloud deployment complete")
- os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
except Exception:
logging.error("Deployment Failed. Please check log")
raise
+ finally:
+ os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
# Post install
logging.info("Executing post deploy configuration")
deploy_vars['congress'] = True
else:
deploy_vars['congress'] = False
+ deploy_vars['calipso'] = ds_opts.get('calipso', False)
+ deploy_vars['calipso_ip'] = net_settings['networks']['admin'][
+ 'installer_vm']['ip']
# TODO(trozet): this is probably redundant with getting external
# network info from undercloud.py
if 'external' in net_settings.enabled_network_list:
deploy_vars['external_network_ipv6'] = True
else:
deploy_vars['external_network_ipv6'] = False
- post_undercloud = os.path.join(args.lib_dir, ansible_path,
+ post_undercloud = os.path.join(args.lib_dir, ANSIBLE_PATH,
'post_deploy_undercloud.yml')
logging.info("Executing post deploy configuration undercloud playbook")
try:
deploy_vars['vpn'] = ds_opts['vpn']
# TODO(trozet): pull all logs and store in tmp dir in overcloud
# playbook
- post_overcloud = os.path.join(args.lib_dir, ansible_path,
+ post_overcloud = os.path.join(args.lib_dir, ANSIBLE_PATH,
'post_deploy_overcloud.yml')
# Run per overcloud node
for node, ip in deploy_vars['overcloud_nodes'].items():