X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=apex%2Fdeploy.py;h=b74d52927e95db0dadafc7191369d98cca1b896b;hb=1817e62a1a79061fbf397b2a8dfda8fdbf0d419b;hp=65f00e0481eaf8e95157c81f407dd4183dc74c54;hpb=abc17537bc6fd0337e4673fd2bc1fc5c46588097;p=apex.git diff --git a/apex/deploy.py b/apex/deploy.py index 65f00e04..b74d5292 100644 --- a/apex/deploy.py +++ b/apex/deploy.py @@ -30,10 +30,12 @@ from apex import DeploySettings from apex import Inventory from apex import NetworkEnvironment from apex import NetworkSettings +from apex.deployment.snapshot import SnapshotDeployment from apex.common import utils from apex.common import constants from apex.common import parsers from apex.common.exceptions import ApexDeployException +from apex.deployment.tripleo import ApexDeployment from apex.network import jumphost from apex.network import network_data from apex.undercloud import undercloud as uc_lib @@ -42,11 +44,12 @@ from apex.overcloud import deploy as oc_deploy APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp') SDN_IMAGE = 'overcloud-full-opendaylight.qcow2' - - -def deploy_quickstart(args, deploy_settings_file, network_settings_file, - inventory_file=None): - pass +UC_DISK_FILES = [ + 'overcloud-full.vmlinuz', + 'overcloud-full.initrd', + 'ironic-python-agent.initramfs', + 'ironic-python-agent.kernel' +] def validate_cross_settings(deploy_settings, net_settings, inventory): @@ -114,7 +117,7 @@ def create_deploy_parser(): help='File which contains Apex deploy settings') deploy_parser.add_argument('-n', '--network-settings', dest='network_settings_file', - required=True, + required=False, help='File which contains Apex network ' 'settings') deploy_parser.add_argument('-i', '--inventory-file', @@ -175,9 +178,14 @@ def create_deploy_parser(): default='/usr/share/opnfv-apex', help='Directory path for apex ansible ' 'and third party libs') - deploy_parser.add_argument('--quickstart', action='store_true', + deploy_parser.add_argument('-s', '--snapshot', action='store_true', default=False, - help='Use tripleo-quickstart to deploy') + help='Use snapshots for deployment') + deploy_parser.add_argument('--snap-cache', dest='snap_cache', + default="{}/snap_cache".format( + os.path.expanduser('~')), + help='Local directory to cache snapshot ' + 'artifacts. Defaults to $HOME/snap_cache') deploy_parser.add_argument('--upstream', action='store_true', default=True, help='Force deployment to use upstream ' @@ -188,6 +196,11 @@ def create_deploy_parser(): default=False, help='Ignore fetching latest upstream and ' 'use what is in cache') + deploy_parser.add_argument('-p', '--patches', + default='/etc/opnfv-apex/common-patches.yaml', + dest='patches_file', + help='File to include for common patches ' + 'which apply to all deployment scenarios') return deploy_parser @@ -199,20 +212,25 @@ def validate_deploy_args(args): """ logging.debug('Validating arguments for deployment') - if args.virtual and args.inventory_file is not None: + if args.snapshot: + logging.debug('Skipping inventory validation as it is not applicable' + 'to snapshot deployments') + elif args.virtual and args.inventory_file is not None: logging.error("Virtual enabled but inventory file also given") raise ApexDeployException('You should not specify an inventory file ' 'with virtual deployments') elif args.virtual: args.inventory_file = os.path.join(APEX_TEMP_DIR, 'inventory-virt.yaml') - elif os.path.isfile(args.inventory_file) is False: + elif not os.path.isfile(args.inventory_file): logging.error("Specified inventory file does not exist: {}".format( args.inventory_file)) raise ApexDeployException('Specified inventory file does not exist') for settings_file in (args.deploy_settings_file, args.network_settings_file): + if settings_file == args.network_settings_file and args.snapshot: + continue if os.path.isfile(settings_file) is False: logging.error("Specified settings file does not " "exist: {}".format(settings_file)) @@ -247,73 +265,80 @@ def main(): deploy_settings = DeploySettings(args.deploy_settings_file) logging.info("Deploy settings are:\n {}".format(pprint.pformat( deploy_settings))) - net_settings = NetworkSettings(args.network_settings_file) - logging.info("Network settings are:\n {}".format(pprint.pformat( - net_settings))) - os_version = deploy_settings['deploy_options']['os_version'] - net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE) - net_env = NetworkEnvironment(net_settings, net_env_file, - os_version=os_version) - net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE) - utils.dump_yaml(dict(net_env), net_env_target) - - # get global deploy params - ha_enabled = deploy_settings['global_params']['ha_enabled'] - introspect = deploy_settings['global_params'].get('introspect', True) - - if args.virtual: - if args.virt_compute_ram is None: - compute_ram = args.virt_default_ram - else: - compute_ram = args.virt_compute_ram - if deploy_settings['deploy_options']['sdn_controller'] == \ - 'opendaylight' and args.virt_default_ram < 12: - control_ram = 12 - logging.warning('RAM per controller is too low. OpenDaylight ' - 'requires at least 12GB per controller.') - logging.info('Increasing RAM per controller to 12GB') - elif args.virt_default_ram < 10: - control_ram = 10 - logging.warning('RAM per controller is too low. nosdn ' - 'requires at least 10GB per controller.') - logging.info('Increasing RAM per controller to 10GB') - else: - control_ram = args.virt_default_ram - if ha_enabled and args.virt_compute_nodes < 2: - logging.debug('HA enabled, bumping number of compute nodes to 2') - args.virt_compute_nodes = 2 - virt_utils.generate_inventory(args.inventory_file, ha_enabled, - num_computes=args.virt_compute_nodes, - controller_ram=control_ram * 1024, - compute_ram=compute_ram * 1024, - vcpus=args.virt_cpus - ) - inventory = Inventory(args.inventory_file, ha_enabled, args.virtual) - - validate_cross_settings(deploy_settings, net_settings, inventory) + + if not args.snapshot: + net_settings = NetworkSettings(args.network_settings_file) + logging.info("Network settings are:\n {}".format(pprint.pformat( + net_settings))) + os_version = deploy_settings['deploy_options']['os_version'] + net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE) + net_env = NetworkEnvironment(net_settings, net_env_file, + os_version=os_version) + net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE) + utils.dump_yaml(dict(net_env), net_env_target) + + # get global deploy params + ha_enabled = deploy_settings['global_params']['ha_enabled'] + introspect = deploy_settings['global_params'].get('introspect', True) + net_list = net_settings.enabled_network_list + if args.virtual: + if args.virt_compute_ram is None: + compute_ram = args.virt_default_ram + else: + compute_ram = args.virt_compute_ram + if (deploy_settings['deploy_options']['sdn_controller'] == + 'opendaylight' and args.virt_default_ram < 12): + control_ram = 12 + logging.warning('RAM per controller is too low. OpenDaylight ' + 'requires at least 12GB per controller.') + logging.info('Increasing RAM per controller to 12GB') + elif args.virt_default_ram < 10: + control_ram = 10 + logging.warning('RAM per controller is too low. nosdn ' + 'requires at least 10GB per controller.') + logging.info('Increasing RAM per controller to 10GB') + else: + control_ram = args.virt_default_ram + if ha_enabled and args.virt_compute_nodes < 2: + logging.debug( + 'HA enabled, bumping number of compute nodes to 2') + args.virt_compute_nodes = 2 + virt_utils.generate_inventory(args.inventory_file, ha_enabled, + num_computes=args.virt_compute_nodes, + controller_ram=control_ram * 1024, + compute_ram=compute_ram * 1024, + vcpus=args.virt_cpus + ) + inventory = Inventory(args.inventory_file, ha_enabled, args.virtual) + logging.info("Inventory is:\n {}".format(pprint.pformat( + inventory))) + + validate_cross_settings(deploy_settings, net_settings, inventory) + else: + # only one network with snapshots + net_list = [constants.ADMIN_NETWORK] + ds_opts = deploy_settings['deploy_options'] - if args.quickstart: - deploy_settings_file = os.path.join(APEX_TEMP_DIR, - 'apex_deploy_settings.yaml') - utils.dump_yaml(utils.dict_objects_to_str(deploy_settings), - deploy_settings_file) - logging.info("File created: {}".format(deploy_settings_file)) - network_settings_file = os.path.join(APEX_TEMP_DIR, - 'apex_network_settings.yaml') - utils.dump_yaml(utils.dict_objects_to_str(net_settings), - network_settings_file) - logging.info("File created: {}".format(network_settings_file)) - deploy_quickstart(args, deploy_settings_file, network_settings_file, - args.inventory_file) + ansible_args = { + 'virsh_enabled_networks': net_list, + 'snapshot': args.snapshot + } + utils.run_ansible(ansible_args, + os.path.join(args.lib_dir, constants.ANSIBLE_PATH, + 'deploy_dependencies.yml')) + if args.snapshot: + # Start snapshot Deployment + logging.info('Executing Snapshot Deployment...') + SnapshotDeployment(deploy_settings=deploy_settings, + snap_cache_dir=args.snap_cache, + fetch=not args.no_fetch, + all_in_one=not bool(args.virt_compute_nodes)) else: + # Start Standard TripleO Deployment + deployment = ApexDeployment(deploy_settings, args.patches_file, + args.deploy_settings_file) # TODO (trozet): add logic back from: # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR) - ansible_args = { - 'virsh_enabled_networks': net_settings.enabled_network_list - } - utils.run_ansible(ansible_args, - os.path.join(args.lib_dir, constants.ANSIBLE_PATH, - 'deploy_dependencies.yml')) uc_external = False if 'external' in net_settings.enabled_network_list: uc_external = True @@ -358,20 +383,31 @@ def main(): args.image_dir = os.path.join(args.image_dir, os_version) upstream_url = constants.UPSTREAM_RDO.replace( constants.DEFAULT_OS_VERSION, os_version) - upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2'] + upstream_targets = ['overcloud-full.tar', 'ironic-python-agent.tar'] utils.fetch_upstream_and_unpack(args.image_dir, upstream_url, upstream_targets, fetch=not args.no_fetch) + # Copy ironic files and overcloud ramdisk and kernel into temp dir + # to be copied by ansible into undercloud /home/stack + # Note the overcloud disk does not need to be copied here as it will + # be modified and copied later + for tmp_file in UC_DISK_FILES: + shutil.copyfile(os.path.join(args.image_dir, tmp_file), + os.path.join(APEX_TEMP_DIR, tmp_file)) sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2') # copy undercloud so we don't taint upstream fetch uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2') - uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2') + uc_fetch_img = sdn_image shutil.copyfile(uc_fetch_img, uc_image) # prep undercloud with required packages + if platform.machine() != 'aarch64': + uc_builder.update_repos(image=uc_image, + branch=branch.replace('stable/', '')) uc_builder.add_upstream_packages(uc_image) + uc_builder.inject_calipso_installer(APEX_TEMP_DIR, uc_image) # add patches from upstream to undercloud and overcloud logging.info('Adding patches to undercloud') - patches = deploy_settings['global_params']['patches'] + patches = deployment.determine_patches() c_builder.add_upstream_patches(patches['undercloud'], uc_image, APEX_TEMP_DIR, branch) @@ -395,6 +431,15 @@ def main(): for role in 'compute', 'controller': oc_cfg.create_nic_template(net_settings, deploy_settings, role, args.deploy_dir, APEX_TEMP_DIR) + # Prepare/Upload docker images + docker_env = 'containers-prepare-parameter.yaml' + shutil.copyfile(os.path.join(args.deploy_dir, docker_env), + os.path.join(APEX_TEMP_DIR, docker_env)) + c_builder.prepare_container_images( + os.path.join(APEX_TEMP_DIR, docker_env), + branch=branch.replace('stable/', ''), + neutron_driver=c_builder.get_neutron_driver(ds_opts) + ) # Install Undercloud undercloud.configure(net_settings, deploy_settings, os.path.join(args.lib_dir, constants.ANSIBLE_PATH, @@ -410,6 +455,9 @@ def main(): else: net_data = False + shutil.copyfile(os.path.join(args.deploy_dir, 'build_ovs_nsh.sh'), + os.path.join(APEX_TEMP_DIR, 'build_ovs_nsh.sh')) + # TODO(trozet): Either fix opnfv env or default to use upstream env if args.env_file == 'opnfv-environment.yaml': # Override the env_file if it is defaulted to opnfv @@ -433,6 +481,10 @@ def main(): 'prepare_overcloud_containers.yml') if ds_opts['containers']: logging.info("Preparing Undercloud with Docker containers") + sdn_env = oc_deploy.get_docker_sdn_files(ds_opts) + sdn_env_files = str() + for sdn_file in sdn_env: + sdn_env_files += " -e {}".format(sdn_file) if patched_containers: oc_builder.archive_docker_patches(APEX_TEMP_DIR) container_vars = dict() @@ -445,8 +497,7 @@ def main(): container_vars['undercloud_ip'] = undercloud_admin_ip container_vars['os_version'] = os_version container_vars['aarch64'] = platform.machine() == 'aarch64' - container_vars['sdn_env_file'] = \ - oc_deploy.get_docker_sdn_file(ds_opts) + container_vars['sdn_env_file'] = sdn_env_files try: utils.run_ansible(container_vars, docker_playbook, host=undercloud.ip, user='stack', @@ -455,6 +506,8 @@ def main(): except Exception: logging.error("Unable to complete container prep on " "Undercloud") + for tmp_file in UC_DISK_FILES: + os.remove(os.path.join(APEX_TEMP_DIR, tmp_file)) os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2')) raise @@ -502,6 +555,8 @@ def main(): raise finally: os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2')) + for tmp_file in UC_DISK_FILES: + os.remove(os.path.join(APEX_TEMP_DIR, tmp_file)) # Post install logging.info("Executing post deploy configuration")