X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=apex%2Fovercloud%2Fdeploy.py;h=dc7d84f0e5890d14a8e7cd5339e1772590d88e4b;hb=7c17ef653f9c867cdcb969c40f5a1eb4251bad74;hp=19d46e15afda76d20f6e8c1e8118b29c59400ffa;hpb=2719f32c6d8bd4d69ebdd5215d43959d224ffc76;p=apex.git diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py index 19d46e15..dc7d84f0 100644 --- a/apex/overcloud/deploy.py +++ b/apex/overcloud/deploy.py @@ -16,10 +16,13 @@ import shutil import uuid import struct import time +import apex.builders.overcloud_builder as oc_builder +import apex.builders.common_builder as c_builder from apex.common import constants as con from apex.common.exceptions import ApexDeployException from apex.common import parsers +from apex.common import utils from apex.virtual import utils as virt_utils from cryptography.hazmat.primitives import serialization as \ crypto_serialization @@ -39,6 +42,7 @@ SDN_FILE_MAP = { 'default': 'neutron-opendaylight-honeycomb.yaml' }, 'l2gw': 'neutron-l2gw-opendaylight.yaml', + 'sriov': 'neutron-opendaylight-sriov.yaml', 'default': 'neutron-opendaylight.yaml', }, 'onos': { @@ -71,6 +75,23 @@ OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm" ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \ ".noarch.rpm" +LOOP_DEVICE_SIZE = "10G" + +LOSETUP_SERVICE = """[Unit] +Description=Setup loop devices +Before=network.target + +[Service] +Type=oneshot +ExecStart=/sbin/losetup /dev/loop3 /srv/data.img +ExecStop=/sbin/losetup -d /dev/loop3 +TimeoutSec=60 +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target +""" + def build_sdn_env_list(ds, sdn_map, env_list=None): """ @@ -91,7 +112,7 @@ def build_sdn_env_list(ds, sdn_map, env_list=None): if env_list is None: env_list = list() for k, v in sdn_map.items(): - if ds['sdn_controller'] == k or (k in ds and ds[k] is True): + if ds['sdn_controller'] == k or (k in ds and ds[k]): if isinstance(v, dict): # Append default SDN env file first # The assumption is that feature-enabled SDN env files @@ -101,12 +122,12 @@ def build_sdn_env_list(ds, sdn_map, env_list=None): env_list.append(os.path.join(con.THT_ENV_DIR, v['default'])) env_list.extend(build_sdn_env_list(ds, v)) + # check if the value is not a boolean + elif isinstance(v, tuple): + if ds[k] == v[0]: + env_list.append(os.path.join(con.THT_ENV_DIR, v[1])) else: env_list.append(os.path.join(con.THT_ENV_DIR, v)) - # check if the value is not a boolean - elif isinstance(v, tuple): - if ds[k] == v[0]: - env_list.append(os.path.join(con.THT_ENV_DIR, v[1])) if len(env_list) == 0: try: env_list.append(os.path.join( @@ -117,6 +138,26 @@ def build_sdn_env_list(ds, sdn_map, env_list=None): return env_list +def get_docker_sdn_file(ds_opts): + """ + Returns docker env file for detected SDN + :param ds_opts: deploy options + :return: docker THT env file for an SDN + """ + # FIXME(trozet): We assume right now there is only one docker SDN file + docker_services = con.VALID_DOCKER_SERVICES + tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']] + sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP) + for sdn_file in sdn_env_list: + sdn_base = os.path.basename(sdn_file) + if sdn_base in docker_services: + if docker_services[sdn_base] is not None: + return os.path.join(tht_dir, + docker_services[sdn_base]) + else: + return os.path.join(tht_dir, sdn_base) + + def create_deploy_cmd(ds, ns, inv, tmp_dir, virtual, env_file='opnfv-environment.yaml', net_data=False): @@ -124,22 +165,51 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, logging.info("Creating deployment command") deploy_options = ['network-environment.yaml'] + ds_opts = ds['deploy_options'] + + if ds_opts['containers']: + deploy_options.append(os.path.join(con.THT_ENV_DIR, + 'docker.yaml')) + + if ds['global_params']['ha_enabled']: + if ds_opts['containers']: + deploy_options.append(os.path.join(con.THT_ENV_DIR, + 'docker-ha.yaml')) + else: + deploy_options.append(os.path.join(con.THT_ENV_DIR, + 'puppet-pacemaker.yaml')) + if env_file: deploy_options.append(env_file) - ds_opts = ds['deploy_options'] - deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP) + + if ds_opts['containers']: + deploy_options.append('docker-images.yaml') + sdn_docker_file = get_docker_sdn_file(ds_opts) + if sdn_docker_file: + deploy_options.append(sdn_docker_file) + deploy_options.append('sdn-images.yaml') + else: + deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP) for k, v in OTHER_FILE_MAP.items(): if k in ds_opts and ds_opts[k]: - deploy_options.append(os.path.join(con.THT_ENV_DIR, v)) + if ds_opts['containers']: + deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR, + "{}.yaml".format(k))) + else: + deploy_options.append(os.path.join(con.THT_ENV_DIR, v)) - if ds_opts['ceph']: - prep_storage_env(ds, tmp_dir) + if ds_opts['ceph'] and 'csit' not in env_file: + prep_storage_env(ds, ns, virtual, tmp_dir) deploy_options.append(os.path.join(con.THT_ENV_DIR, 'storage-environment.yaml')) - if ds['global_params']['ha_enabled']: - deploy_options.append(os.path.join(con.THT_ENV_DIR, - 'puppet-pacemaker.yaml')) + if ds_opts['sriov']: + prep_sriov_env(ds, tmp_dir) + + # Check for 'k8s' here intentionally, as we may support other values + # such as openstack/openshift for 'vim' option. + if ds_opts['vim'] == 'k8s': + deploy_options.append('kubernetes-environment.yaml') if virtual: deploy_options.append('virtual-environment.yaml') @@ -186,13 +256,17 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, return cmd -def prep_image(ds, img, tmp_dir, root_pw=None): +def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None, + patches=None): """ Locates sdn image and preps for deployment. :param ds: deploy settings + :param ns: network settings :param img: sdn image :param tmp_dir: dir to store modified sdn image :param root_pw: password to configure for overcloud image + :param docker_tag: Docker image tag for RDO version (default None) + :param patches: List of patches to apply to overcloud image :return: None """ # TODO(trozet): Come up with a better way to organize this logic in this @@ -205,6 +279,7 @@ def prep_image(ds, img, tmp_dir, root_pw=None): ds_opts = ds['deploy_options'] virt_cmds = list() sdn = ds_opts['sdn_controller'] + patched_containers = set() # we need this due to rhbz #1436021 # fixed in systemd-219-37.el7 if sdn is not False: @@ -219,6 +294,18 @@ def prep_image(ds, img, tmp_dir, root_pw=None): ".service" }]) + if ns.get('http_proxy', ''): + virt_cmds.append({ + con.VIRT_RUN_CMD: + "echo 'http_proxy={}' >> /etc/environment".format( + ns['http_proxy'])}) + + if ns.get('https_proxy', ''): + virt_cmds.append({ + con.VIRT_RUN_CMD: + "echo 'https_proxy={}' >> /etc/environment".format( + ns['https_proxy'])}) + if ds_opts['vpn']: virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"}) virt_cmds.append({ @@ -281,48 +368,59 @@ def prep_image(ds, img, tmp_dir, root_pw=None): "/root/nosdn_vpp_rpms/*.rpm"} ]) - if sdn == 'opendaylight': - if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION: - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "yum -y remove opendaylight"}, - {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"}, - {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf " - "/root/puppet-opendaylight-" - "{}.tar.gz".format(ds_opts['odl_version'])} - ]) - if ds_opts['odl_version'] == 'master': - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format( - ds_opts['odl_version'])} - ]) - else: - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format( - ds_opts['odl_version'])} - ]) - - elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \ - and ds_opts['odl_vpp_netvirt']: - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "yum -y remove opendaylight"}, - {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format( - ODL_NETVIRT_VPP_RPM)} - ]) - - if sdn == 'ovn': - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y " - "*openvswitch*"}, - {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y " - "*openvswitch*"} - ]) - tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2') shutil.copyfile(img, tmp_oc_image) logging.debug("Temporary overcloud image stored as: {}".format( tmp_oc_image)) + + if sdn == 'opendaylight': + undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][ + 'installer_vm']['ip'] + oc_builder.inject_opendaylight( + odl_version=ds_opts['odl_version'], + image=tmp_oc_image, + tmp_dir=tmp_dir, + uc_ip=undercloud_admin_ip, + os_version=ds_opts['os_version'], + docker_tag=docker_tag, + ) + if docker_tag: + patched_containers = patched_containers.union({'opendaylight'}) + + if patches: + if ds_opts['os_version'] == 'master': + branch = ds_opts['os_version'] + else: + branch = "stable/{}".format(ds_opts['os_version']) + logging.info('Adding patches to overcloud') + patched_containers = patched_containers.union( + c_builder.add_upstream_patches(patches, + tmp_oc_image, tmp_dir, + branch, + uc_ip=undercloud_admin_ip, + docker_tag=docker_tag)) + # if containers with ceph, and no ceph device we need to use a + # persistent loop device for Ceph OSDs + if docker_tag and ds_opts['ceph_device'] == '/dev/loop3': + tmp_losetup = os.path.join(tmp_dir, 'losetup.service') + with open(tmp_losetup, 'w') as fh: + fh.write(LOSETUP_SERVICE) + virt_cmds.extend([ + {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup) + }, + {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}' + .format(LOOP_DEVICE_SIZE)}, + {con.VIRT_RUN_CMD: 'systemctl daemon-reload'}, + {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'}, + ]) + # TODO(trozet) remove this after LP#173474 is fixed + dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service' + virt_cmds.append( + {con.VIRT_RUN_CMD: "crudini --del {} Unit " + "ConditionPathExists".format(dhcp_unit)}) virt_utils.virt_customize(virt_cmds, tmp_oc_image) logging.info("Overcloud image customization complete") + return patched_containers def make_ssh_key(): @@ -401,6 +499,10 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): else: perf = False + tenant_settings = ns['networks']['tenant'] + tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \ + ns['networks']['tenant'].get('segmentation_type') == 'vlan' + # Modify OPNFV environment # TODO: Change to build a dict and outputting yaml rather than parsing for line in fileinput.input(tmp_opnfv_env, inplace=True): @@ -424,6 +526,46 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): ds_opts['dataplane'] == 'ovs_dpdk': output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \ './ovs-dpdk-preconfig.yaml' + elif 'NeutronNetworkVLANRanges' in line: + vlan_setting = '' + if tenant_vlan_enabled: + if ns['networks']['tenant']['overlay_id_range']: + vlan_setting = ns['networks']['tenant']['overlay_id_range'] + if 'datacentre' not in vlan_setting: + vlan_setting += ',datacentre:1:1000' + # SRIOV networks are VLAN based provider networks. In order to + # simplify the deployment, nfv_sriov will be the default physnet. + # VLANs are not needed in advance, and the user will have to create + # the network specifying the segmentation-id. + if ds_opts['sriov']: + if vlan_setting: + vlan_setting += ",nfv_sriov" + else: + vlan_setting = "datacentre:1:1000,nfv_sriov" + if vlan_setting: + output_line = " NeutronNetworkVLANRanges: " + vlan_setting + elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled: + if tenant_settings['overlay_id_range']: + physnets = tenant_settings['overlay_id_range'].split(',') + output_line = " NeutronBridgeMappings: " + for physnet in physnets: + physnet_name = physnet.split(':')[0] + if physnet_name != 'datacentre': + output_line += "{}:br-vlan,".format(physnet_name) + output_line += "datacentre:br-ex" + elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \ + and ds_opts['sdn_controller'] == 'opendaylight': + if tenant_settings['overlay_id_range']: + physnets = tenant_settings['overlay_id_range'].split(',') + output_line = " OpenDaylightProviderMappings: " + for physnet in physnets: + physnet_name = physnet.split(':')[0] + if physnet_name != 'datacentre': + output_line += "{}:br-vlan,".format(physnet_name) + output_line += "datacentre:br-ex" + elif 'NeutronNetworkType' in line and tenant_vlan_enabled: + output_line = " NeutronNetworkType: vlan\n" \ + " NeutronTunnelTypes: ''" if ds_opts['sdn_controller'] == 'opendaylight' and \ 'odl_vpp_routing_node' in ds_opts: @@ -433,8 +575,11 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): ns['domain_name'])) elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio': if 'NeutronVPPAgentPhysnets' in line: - output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'". - format(tenant_nic['Controller'])) + # VPP interface tap0 will be used for external network + # connectivity. + output_line = (" NeutronVPPAgentPhysnets: " + "'datacentre:{},external:tap0'" + .format(tenant_nic['Controller'])) elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get( 'dvr') is True: if 'OS::TripleO::Services::NeutronDhcpAgent' in line: @@ -517,11 +662,13 @@ def generate_ceph_key(): return base64.b64encode(header + key) -def prep_storage_env(ds, tmp_dir): +def prep_storage_env(ds, ns, virtual, tmp_dir): """ Creates storage environment file for deployment. Source file is copied by undercloud playbook to host. :param ds: + :param ns: + :param virtual: :param tmp_dir: :return: """ @@ -548,7 +695,30 @@ def prep_storage_env(ds, tmp_dir): 'utf-8'))) else: print(line) - if 'ceph_device' in ds_opts and ds_opts['ceph_device']: + + if ds_opts['containers']: + ceph_params = {} + + # max pgs allowed are calculated as num_mons * 200. Therefore we + # set number of pgs and pools so that the total will be less: + # num_pgs * num_pools * num_osds + ceph_params['CephPoolDefaultSize'] = 2 + ceph_params['CephPoolDefaultPgNum'] = 32 + if virtual: + ceph_params['CephAnsibleExtraConfig'] = { + 'centos_package_dependencies': [], + 'ceph_osd_docker_memory_limit': '1g', + 'ceph_mds_docker_memory_limit': '1g', + } + ceph_device = ds_opts['ceph_device'] + ceph_params['CephAnsibleDisksConfig'] = { + 'devices': [ceph_device], + 'journal_size': 512, + 'osd_scenario': 'collocated' + } + utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params) + # TODO(trozet): remove following block as we only support containers now + elif 'ceph_device' in ds_opts and ds_opts['ceph_device']: with open(storage_file, 'a') as fh: fh.write(' ExtraConfig:\n') fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format( @@ -556,12 +726,58 @@ def prep_storage_env(ds, tmp_dir): )) -def external_network_cmds(ns): +def prep_sriov_env(ds, tmp_dir): + """ + Creates SRIOV environment file for deployment. Source file is copied by + undercloud playbook to host. + :param ds: + :param tmp_dir: + :return: + """ + ds_opts = ds['deploy_options'] + sriov_iface = ds_opts['sriov'] + sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml') + if not os.path.isfile(sriov_file): + logging.error("sriov-environment file is not in tmp directory: {}. " + "Check if file was copied from " + "undercloud".format(tmp_dir)) + raise ApexDeployException("sriov-environment file not copied from " + "undercloud") + # TODO(rnoriega): Instead of line editing, refactor this code to load + # yaml file into a dict, edit it and write the file back. + for line in fileinput.input(sriov_file, inplace=True): + line = line.strip('\n') + if 'NovaSchedulerDefaultFilters' in line: + print(" {}".format(line[3:])) + elif 'NovaSchedulerAvailableFilters' in line: + print(" {}".format(line[3:])) + elif 'NeutronPhysicalDevMappings' in line: + print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\"" + .format(sriov_iface)) + elif 'NeutronSriovNumVFs' in line: + print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface)) + elif 'NovaPCIPassthrough' in line: + print(" NovaPCIPassthrough:") + elif 'devname' in line: + print(" - devname: \"{}\"".format(sriov_iface)) + elif 'physical_network' in line: + print(" physical_network: \"nfv_sriov\"") + else: + print(line) + + +def external_network_cmds(ns, ds): """ Generates external network openstack commands :param ns: network settings + :param ds: deploy settings :return: list of commands to configure external network """ + ds_opts = ds['deploy_options'] + external_physnet = 'datacentre' + if ds_opts['dataplane'] == 'fdio' and \ + ds_opts['sdn_controller'] != 'opendaylight': + external_physnet = 'external' if 'external' in ns.enabled_network_list: net_config = ns['networks']['external'][0] external = True @@ -582,7 +798,8 @@ def external_network_cmds(ns): 'compute']['vlan']) cmds.append("openstack network create external --project service " "--external --provider-network-type {} " - "--provider-physical-network datacentre".format(ext_type)) + "--provider-physical-network {}" + .format(ext_type, external_physnet)) # create subnet command cidr = net_config['cidr'] subnet_cmd = "openstack subnet create external-subnet --project " \