X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=apex%2Fovercloud%2Fdeploy.py;h=41b2d56b42b6692f7f82b53f564aac8a4b9b17c7;hb=31e6f7f5529d04f12caabe804bfef305b5f263e8;hp=d1099c410ee76012419063a12d91117b88fe9852;hpb=e1eda223437eed7bef2bdfa7bef990a6749f1670;p=apex.git diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py index d1099c41..41b2d56b 100644 --- a/apex/overcloud/deploy.py +++ b/apex/overcloud/deploy.py @@ -12,14 +12,19 @@ import fileinput import logging import os import platform +import pprint import shutil import uuid import struct import time +import yaml +import apex.builders.overcloud_builder as oc_builder +import apex.builders.common_builder as c_builder from apex.common import constants as con from apex.common.exceptions import ApexDeployException from apex.common import parsers +from apex.common import utils from apex.virtual import utils as virt_utils from cryptography.hazmat.primitives import serialization as \ crypto_serialization @@ -38,6 +43,8 @@ SDN_FILE_MAP = { 'dvr': 'neutron-opendaylight-fdio-dvr.yaml', 'default': 'neutron-opendaylight-honeycomb.yaml' }, + 'l2gw': 'neutron-l2gw-opendaylight.yaml', + 'sriov': 'neutron-opendaylight-sriov.yaml', 'default': 'neutron-opendaylight.yaml', }, 'onos': { @@ -65,11 +72,33 @@ OVS_PERF_MAP = { 'NeutronDpdkMemoryChannels': 'memory_channels' } -OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm" -OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm" ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \ ".noarch.rpm" +LOOP_DEVICE_SIZE = "10G" + +LOSETUP_SERVICE = """[Unit] +Description=Setup loop devices +Before=network.target + +[Service] +Type=oneshot +ExecStart=/sbin/losetup /dev/loop3 /srv/data.img +ExecStop=/sbin/losetup -d /dev/loop3 +TimeoutSec=60 +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target +""" + +DUPLICATE_COMPUTE_SERVICES = [ + 'OS::TripleO::Services::ComputeNeutronCorePlugin', + 'OS::TripleO::Services::ComputeNeutronMetadataAgent', + 'OS::TripleO::Services::ComputeNeutronOvsAgent', + 'OS::TripleO::Services::ComputeNeutronL3Agent' +] + def build_sdn_env_list(ds, sdn_map, env_list=None): """ @@ -90,7 +119,7 @@ def build_sdn_env_list(ds, sdn_map, env_list=None): if env_list is None: env_list = list() for k, v in sdn_map.items(): - if ds['sdn_controller'] == k or (k in ds and ds[k] is True): + if ds['sdn_controller'] == k or (k in ds and ds[k]): if isinstance(v, dict): # Append default SDN env file first # The assumption is that feature-enabled SDN env files @@ -100,12 +129,12 @@ def build_sdn_env_list(ds, sdn_map, env_list=None): env_list.append(os.path.join(con.THT_ENV_DIR, v['default'])) env_list.extend(build_sdn_env_list(ds, v)) + # check if the value is not a boolean + elif isinstance(v, tuple): + if ds[k] == v[0]: + env_list.append(os.path.join(con.THT_ENV_DIR, v[1])) else: env_list.append(os.path.join(con.THT_ENV_DIR, v)) - # check if the value is not a boolean - elif isinstance(v, tuple): - if ds[k] == v[0]: - env_list.append(os.path.join(con.THT_ENV_DIR, v[1])) if len(env_list) == 0: try: env_list.append(os.path.join( @@ -116,6 +145,26 @@ def build_sdn_env_list(ds, sdn_map, env_list=None): return env_list +def get_docker_sdn_files(ds_opts): + """ + Returns docker env file for detected SDN + :param ds_opts: deploy options + :return: list of docker THT env files for an SDN + """ + docker_services = con.VALID_DOCKER_SERVICES + tht_dir = con.THT_DOCKER_ENV_DIR + sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP) + for i, sdn_file in enumerate(sdn_env_list): + sdn_base = os.path.basename(sdn_file) + if sdn_base in docker_services: + if docker_services[sdn_base] is not None: + sdn_env_list[i] = \ + os.path.join(tht_dir, docker_services[sdn_base]) + else: + sdn_env_list[i] = os.path.join(tht_dir, sdn_base) + return sdn_env_list + + def create_deploy_cmd(ds, ns, inv, tmp_dir, virtual, env_file='opnfv-environment.yaml', net_data=False): @@ -123,22 +172,52 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, logging.info("Creating deployment command") deploy_options = ['network-environment.yaml'] + ds_opts = ds['deploy_options'] + + if ds_opts['containers']: + deploy_options.append(os.path.join(con.THT_ENV_DIR, + 'docker.yaml')) + + if ds['global_params']['ha_enabled']: + if ds_opts['containers']: + deploy_options.append(os.path.join(con.THT_ENV_DIR, + 'docker-ha.yaml')) + else: + deploy_options.append(os.path.join(con.THT_ENV_DIR, + 'puppet-pacemaker.yaml')) + if env_file: deploy_options.append(env_file) - ds_opts = ds['deploy_options'] - deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP) + + if ds_opts['containers']: + deploy_options.append('docker-images.yaml') + sdn_docker_files = get_docker_sdn_files(ds_opts) + for sdn_docker_file in sdn_docker_files: + deploy_options.append(sdn_docker_file) + else: + deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP) for k, v in OTHER_FILE_MAP.items(): if k in ds_opts and ds_opts[k]: - deploy_options.append(os.path.join(con.THT_ENV_DIR, v)) + if ds_opts['containers']: + deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR, + "{}.yaml".format(k))) + else: + deploy_options.append(os.path.join(con.THT_ENV_DIR, v)) - if ds_opts['ceph']: - prep_storage_env(ds, tmp_dir) + # TODO(trozet) Fix this check to look for if ceph is in controller services + # and not use name of the file + if ds_opts['ceph'] and 'csit' not in env_file: + prep_storage_env(ds, ns, virtual, tmp_dir) deploy_options.append(os.path.join(con.THT_ENV_DIR, 'storage-environment.yaml')) - if ds['global_params']['ha_enabled']: - deploy_options.append(os.path.join(con.THT_ENV_DIR, - 'puppet-pacemaker.yaml')) + if ds_opts['sriov']: + prep_sriov_env(ds, tmp_dir) + + # Check for 'k8s' here intentionally, as we may support other values + # such as openstack/openshift for 'vim' option. + if ds_opts['vim'] == 'k8s': + deploy_options.append('kubernetes-environment.yaml') if virtual: deploy_options.append('virtual-environment.yaml') @@ -146,11 +225,7 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, deploy_options.append('baremetal-environment.yaml') num_control, num_compute = inv.get_node_counts() - if num_control == 0 or num_compute == 0: - logging.error("Detected 0 control or compute nodes. Control nodes: " - "{}, compute nodes{}".format(num_control, num_compute)) - raise ApexDeployException("Invalid number of control or computes") - elif num_control > 1 and not ds['global_params']['ha_enabled']: + if num_control > 1 and not ds['global_params']['ha_enabled']: num_control = 1 if platform.machine() == 'aarch64': # aarch64 deploys were not completing in the default 90 mins. @@ -172,12 +247,16 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, if net_data: cmd += ' --networks-file network_data.yaml' libvirt_type = 'kvm' - if virtual: + if virtual and (platform.machine() != 'aarch64'): with open('/sys/module/kvm_intel/parameters/nested') as f: nested_kvm = f.read().strip() if nested_kvm != 'Y': libvirt_type = 'qemu' + elif virtual and (platform.machine() == 'aarch64'): + libvirt_type = 'qemu' cmd += ' --libvirt-type {}'.format(libvirt_type) + if platform.machine() == 'aarch64': + cmd += ' --override-ansible-cfg /home/stack/ansible.cfg ' logging.info("Deploy command set: {}".format(cmd)) with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh: @@ -185,13 +264,17 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, return cmd -def prep_image(ds, img, tmp_dir, root_pw=None): +def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None, + patches=None): """ Locates sdn image and preps for deployment. :param ds: deploy settings + :param ns: network settings :param img: sdn image :param tmp_dir: dir to store modified sdn image :param root_pw: password to configure for overcloud image + :param docker_tag: Docker image tag for RDO version (default None) + :param patches: List of patches to apply to overcloud image :return: None """ # TODO(trozet): Come up with a better way to organize this logic in this @@ -204,6 +287,7 @@ def prep_image(ds, img, tmp_dir, root_pw=None): ds_opts = ds['deploy_options'] virt_cmds = list() sdn = ds_opts['sdn_controller'] + patched_containers = set() # we need this due to rhbz #1436021 # fixed in systemd-219-37.el7 if sdn is not False: @@ -218,7 +302,25 @@ def prep_image(ds, img, tmp_dir, root_pw=None): ".service" }]) + if ns.get('http_proxy', ''): + virt_cmds.append({ + con.VIRT_RUN_CMD: + "echo 'http_proxy={}' >> /etc/environment".format( + ns['http_proxy'])}) + + if ns.get('https_proxy', ''): + virt_cmds.append({ + con.VIRT_RUN_CMD: + "echo 'https_proxy={}' >> /etc/environment".format( + ns['https_proxy'])}) + + tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2') + shutil.copyfile(img, tmp_oc_image) + logging.debug("Temporary overcloud image stored as: {}".format( + tmp_oc_image)) + if ds_opts['vpn']: + oc_builder.inject_quagga(tmp_oc_image, tmp_dir) virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"}) virt_cmds.append({ con.VIRT_RUN_CMD: @@ -258,15 +360,24 @@ def prep_image(ds, img, tmp_dir, root_pw=None): if root_pw: pw_op = "password:{}".format(root_pw) virt_cmds.append({con.VIRT_PW: pw_op}) - if ds_opts['sfc'] and dataplane == 'ovs': - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "yum -y install " - "/root/ovs/rpm/rpmbuild/RPMS/x86_64/" - "{}".format(OVS_NSH_KMOD_RPM)}, - {con.VIRT_RUN_CMD: "yum downgrade -y " - "/root/ovs/rpm/rpmbuild/RPMS/x86_64/" - "{}".format(OVS_NSH_RPM)} - ]) + + if dataplane == 'ovs': + if ds_opts['sfc']: + oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir) + elif sdn == 'opendaylight': + # FIXME(trozet) remove this after RDO is updated with fix for + # https://bugzilla.redhat.com/show_bug.cgi?id=1544892 + ovs_file = os.path.basename(con.CUSTOM_OVS) + ovs_url = con.CUSTOM_OVS.replace(ovs_file, '') + utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url, + targets=[ovs_file]) + virt_cmds.extend([ + {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir, + ovs_file))}, + {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format( + ovs_file)} + ]) + if dataplane == 'fdio': # Patch neutron with using OVS external interface for router # and add generic linux NS interface driver @@ -280,48 +391,71 @@ def prep_image(ds, img, tmp_dir, root_pw=None): "/root/nosdn_vpp_rpms/*.rpm"} ]) + undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][ + 'installer_vm']['ip'] if sdn == 'opendaylight': - if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION: - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "yum -y remove opendaylight"}, - {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"}, - {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf " - "/root/puppet-opendaylight-" - "{}.tar.gz".format(ds_opts['odl_version'])} - ]) - if ds_opts['odl_version'] == 'master': - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format( - ds_opts['odl_version'])} - ]) - else: - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format( - ds_opts['odl_version'])} - ]) - - elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \ - and ds_opts['odl_vpp_netvirt']: - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "yum -y remove opendaylight"}, - {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format( - ODL_NETVIRT_VPP_RPM)} - ]) - - if sdn == 'ovn': + oc_builder.inject_opendaylight( + odl_version=ds_opts['odl_version'], + image=tmp_oc_image, + tmp_dir=tmp_dir, + uc_ip=undercloud_admin_ip, + os_version=ds_opts['os_version'], + docker_tag=docker_tag, + ) + if docker_tag: + patched_containers = patched_containers.union({'opendaylight'}) + + if patches: + if ds_opts['os_version'] == 'master': + branch = ds_opts['os_version'] + else: + branch = "stable/{}".format(ds_opts['os_version']) + logging.info('Adding patches to overcloud') + patched_containers = patched_containers.union( + c_builder.add_upstream_patches(patches, + tmp_oc_image, tmp_dir, + branch, + uc_ip=undercloud_admin_ip, + docker_tag=docker_tag)) + # if containers with ceph, and no ceph device we need to use a + # persistent loop device for Ceph OSDs + if docker_tag and ds_opts['ceph_device'] == '/dev/loop3': + tmp_losetup = os.path.join(tmp_dir, 'losetup.service') + with open(tmp_losetup, 'w') as fh: + fh.write(LOSETUP_SERVICE) virt_cmds.extend([ - {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y " - "*openvswitch*"}, - {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y " - "*openvswitch*"} + {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup) + }, + {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}' + .format(LOOP_DEVICE_SIZE)}, + {con.VIRT_RUN_CMD: 'systemctl daemon-reload'}, + {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'}, ]) - - tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2') - shutil.copyfile(img, tmp_oc_image) - logging.debug("Temporary overcloud image stored as: {}".format( - tmp_oc_image)) + # TODO(trozet) remove this after LP#173474 is fixed + dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service' + virt_cmds.append( + {con.VIRT_RUN_CMD: "crudini --del {} Unit " + "ConditionPathExists".format(dhcp_unit)}) + # Prep for NFS + virt_cmds.extend([ + {con.VIRT_INSTALL: "nfs-utils"}, + {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service " + "/etc/systemd/system/multi-user.target.wants/" + "nfs-server.service"}, + {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/glance"}, + {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/cinder"}, + {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/nova"}, + {con.VIRT_RUN_CMD: "echo '/root/nfs/glance *(rw,sync," + "no_root_squash,no_acl)' > /etc/exports"}, + {con.VIRT_RUN_CMD: "echo '/root/nfs/cinder *(rw,sync," + "no_root_squash,no_acl)' >> /etc/exports"}, + {con.VIRT_RUN_CMD: "echo '/root/nfs/nova *(rw,sync," + "no_root_squash,no_acl)' >> /etc/exports"}, + {con.VIRT_RUN_CMD: "exportfs -avr"}, + ]) virt_utils.virt_customize(virt_cmds, tmp_oc_image) logging.info("Overcloud image customization complete") + return patched_containers def make_ssh_key(): @@ -373,6 +507,10 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): # SSH keys private_key, public_key = make_ssh_key() + num_control, num_compute = inv.get_node_counts() + if num_control > 1 and not ds['global_params']['ha_enabled']: + num_control = 1 + # Make easier/faster variables to index in the file editor if 'performance' in ds_opts: perf = True @@ -400,6 +538,10 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): else: perf = False + tenant_settings = ns['networks']['tenant'] + tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \ + ns['networks']['tenant'].get('segmentation_type') == 'vlan' + # Modify OPNFV environment # TODO: Change to build a dict and outputting yaml rather than parsing for line in fileinput.input(tmp_opnfv_env, inplace=True): @@ -423,6 +565,46 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): ds_opts['dataplane'] == 'ovs_dpdk': output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \ './ovs-dpdk-preconfig.yaml' + elif 'NeutronNetworkVLANRanges' in line: + vlan_setting = '' + if tenant_vlan_enabled: + if ns['networks']['tenant']['overlay_id_range']: + vlan_setting = ns['networks']['tenant']['overlay_id_range'] + if 'datacentre' not in vlan_setting: + vlan_setting += ',datacentre:1:1000' + # SRIOV networks are VLAN based provider networks. In order to + # simplify the deployment, nfv_sriov will be the default physnet. + # VLANs are not needed in advance, and the user will have to create + # the network specifying the segmentation-id. + if ds_opts['sriov']: + if vlan_setting: + vlan_setting += ",nfv_sriov" + else: + vlan_setting = "datacentre:1:1000,nfv_sriov" + if vlan_setting: + output_line = " NeutronNetworkVLANRanges: " + vlan_setting + elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled: + if tenant_settings['overlay_id_range']: + physnets = tenant_settings['overlay_id_range'].split(',') + output_line = " NeutronBridgeMappings: " + for physnet in physnets: + physnet_name = physnet.split(':')[0] + if physnet_name != 'datacentre': + output_line += "{}:br-vlan,".format(physnet_name) + output_line += "datacentre:br-ex" + elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \ + and ds_opts['sdn_controller'] == 'opendaylight': + if tenant_settings['overlay_id_range']: + physnets = tenant_settings['overlay_id_range'].split(',') + output_line = " OpenDaylightProviderMappings: " + for physnet in physnets: + physnet_name = physnet.split(':')[0] + if physnet_name != 'datacentre': + output_line += "{}:br-vlan,".format(physnet_name) + output_line += "datacentre:br-ex" + elif 'NeutronNetworkType' in line and tenant_vlan_enabled: + output_line = " NeutronNetworkType: vlan\n" \ + " NeutronTunnelTypes: ''" if ds_opts['sdn_controller'] == 'opendaylight' and \ 'odl_vpp_routing_node' in ds_opts: @@ -432,16 +614,22 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): ns['domain_name'])) elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio': if 'NeutronVPPAgentPhysnets' in line: - output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'". - format(tenant_nic['Controller'])) + # VPP interface tap0 will be used for external network + # connectivity. + output_line = (" NeutronVPPAgentPhysnets: " + "'datacentre:{},external:tap0'" + .format(tenant_nic['Controller'])) elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get( 'dvr') is True: if 'OS::TripleO::Services::NeutronDhcpAgent' in line: output_line = '' elif 'NeutronDhcpAgentsPerNetwork' in line: - num_control, num_compute = inv.get_node_counts() + if num_compute == 0: + num_dhcp_agents = num_control + else: + num_dhcp_agents = num_compute output_line = (" NeutronDhcpAgentsPerNetwork: {}" - .format(num_compute)) + .format(num_dhcp_agents)) elif 'ComputeServices' in line: output_line = (" ComputeServices:\n" " - OS::TripleO::Services::NeutronDhcpAgent") @@ -507,7 +695,46 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): print(output_line) + # Merge compute services into control services if only a single + # node deployment + if num_compute == 0: + logging.info("All in one deployment. Checking if service merging " + "required into control services") + with open(tmp_opnfv_env, 'r') as fh: + data = yaml.safe_load(fh) + param_data = data['parameter_defaults'] + # Check to see if any parameters are set for Compute + for param in param_data.keys(): + if param != 'ComputeServices' and param.startswith('Compute'): + logging.warning("Compute parameter set, but will not be used " + "in deployment: {}. Please use Controller " + "based parameters when using All-in-one " + "deployments".format(param)) + if ('ControllerServices' in param_data and 'ComputeServices' in + param_data): + logging.info("Services detected in environment file. Merging...") + ctrl_services = param_data['ControllerServices'] + cmp_services = param_data['ComputeServices'] + param_data['ControllerServices'] = list(set().union( + ctrl_services, cmp_services)) + for dup_service in DUPLICATE_COMPUTE_SERVICES: + if dup_service in param_data['ControllerServices']: + param_data['ControllerServices'].remove(dup_service) + param_data.pop('ComputeServices') + logging.debug("Merged controller services: {}".format( + pprint.pformat(param_data['ControllerServices']) + )) + with open(tmp_opnfv_env, 'w') as fh: + yaml.safe_dump(data, fh, default_flow_style=False) + else: + logging.info("No services detected in env file, not merging " + "services") + logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env)) + with open(tmp_opnfv_env, 'r') as fh: + logging.debug("opnfv-environment content is : {}".format( + pprint.pformat(yaml.safe_load(fh.read())) + )) def generate_ceph_key(): @@ -516,11 +743,13 @@ def generate_ceph_key(): return base64.b64encode(header + key) -def prep_storage_env(ds, tmp_dir): +def prep_storage_env(ds, ns, virtual, tmp_dir): """ Creates storage environment file for deployment. Source file is copied by undercloud playbook to host. :param ds: + :param ns: + :param virtual: :param tmp_dir: :return: """ @@ -542,9 +771,35 @@ def prep_storage_env(ds, tmp_dir): elif 'CephAdminKey' in line: print(" CephAdminKey: {}".format(generate_ceph_key().decode( 'utf-8'))) + elif 'CephClientKey' in line: + print(" CephClientKey: {}".format(generate_ceph_key().decode( + 'utf-8'))) else: print(line) - if 'ceph_device' in ds_opts and ds_opts['ceph_device']: + + if ds_opts['containers']: + ceph_params = {} + + # max pgs allowed are calculated as num_mons * 200. Therefore we + # set number of pgs and pools so that the total will be less: + # num_pgs * num_pools * num_osds + ceph_params['CephPoolDefaultSize'] = 2 + ceph_params['CephPoolDefaultPgNum'] = 32 + if virtual: + ceph_params['CephAnsibleExtraConfig'] = { + 'centos_package_dependencies': [], + 'ceph_osd_docker_memory_limit': '1g', + 'ceph_mds_docker_memory_limit': '1g', + } + ceph_device = ds_opts['ceph_device'] + ceph_params['CephAnsibleDisksConfig'] = { + 'devices': [ceph_device], + 'journal_size': 512, + 'osd_scenario': 'collocated' + } + utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params) + # TODO(trozet): remove following block as we only support containers now + elif 'ceph_device' in ds_opts and ds_opts['ceph_device']: with open(storage_file, 'a') as fh: fh.write(' ExtraConfig:\n') fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format( @@ -552,12 +807,58 @@ def prep_storage_env(ds, tmp_dir): )) -def external_network_cmds(ns): +def prep_sriov_env(ds, tmp_dir): + """ + Creates SRIOV environment file for deployment. Source file is copied by + undercloud playbook to host. + :param ds: + :param tmp_dir: + :return: + """ + ds_opts = ds['deploy_options'] + sriov_iface = ds_opts['sriov'] + sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml') + if not os.path.isfile(sriov_file): + logging.error("sriov-environment file is not in tmp directory: {}. " + "Check if file was copied from " + "undercloud".format(tmp_dir)) + raise ApexDeployException("sriov-environment file not copied from " + "undercloud") + # TODO(rnoriega): Instead of line editing, refactor this code to load + # yaml file into a dict, edit it and write the file back. + for line in fileinput.input(sriov_file, inplace=True): + line = line.strip('\n') + if 'NovaSchedulerDefaultFilters' in line: + print(" {}".format(line[3:])) + elif 'NovaSchedulerAvailableFilters' in line: + print(" {}".format(line[3:])) + elif 'NeutronPhysicalDevMappings' in line: + print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\"" + .format(sriov_iface)) + elif 'NeutronSriovNumVFs' in line: + print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface)) + elif 'NovaPCIPassthrough' in line: + print(" NovaPCIPassthrough:") + elif 'devname' in line: + print(" - devname: \"{}\"".format(sriov_iface)) + elif 'physical_network' in line: + print(" physical_network: \"nfv_sriov\"") + else: + print(line) + + +def external_network_cmds(ns, ds): """ Generates external network openstack commands :param ns: network settings + :param ds: deploy settings :return: list of commands to configure external network """ + ds_opts = ds['deploy_options'] + external_physnet = 'datacentre' + if ds_opts['dataplane'] == 'fdio' and \ + ds_opts['sdn_controller'] != 'opendaylight': + external_physnet = 'external' if 'external' in ns.enabled_network_list: net_config = ns['networks']['external'][0] external = True @@ -578,7 +879,8 @@ def external_network_cmds(ns): 'compute']['vlan']) cmds.append("openstack network create external --project service " "--external --provider-network-type {} " - "--provider-physical-network datacentre".format(ext_type)) + "--provider-physical-network {}" + .format(ext_type, external_physnet)) # create subnet command cidr = net_config['cidr'] subnet_cmd = "openstack subnet create external-subnet --project " \ @@ -586,8 +888,7 @@ def external_network_cmds(ns): "--allocation-pool start={},end={} --subnet-range " \ "{}".format(gateway, pool_start, pool_end, str(cidr)) if external and cidr.version == 6: - subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \ - '--ipv6-address-mode slaac' + subnet_cmd += ' --ip-version 6' cmds.append(subnet_cmd) logging.debug("Neutron external network commands determined " "as: {}".format(cmds))