X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=apex%2Fovercloud%2Fdeploy.py;h=41b2d56b42b6692f7f82b53f564aac8a4b9b17c7;hb=31e6f7f5529d04f12caabe804bfef305b5f263e8;hp=a45b3a9bfd3837b732243acdbf4305e1aa0d369e;hpb=3db4362870c073717d8d9ac90c31be583cf9b379;p=apex.git diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py index a45b3a9b..41b2d56b 100644 --- a/apex/overcloud/deploy.py +++ b/apex/overcloud/deploy.py @@ -12,10 +12,12 @@ import fileinput import logging import os import platform +import pprint import shutil import uuid import struct import time +import yaml import apex.builders.overcloud_builder as oc_builder import apex.builders.common_builder as c_builder @@ -70,11 +72,11 @@ OVS_PERF_MAP = { 'NeutronDpdkMemoryChannels': 'memory_channels' } -OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm" -OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm" ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \ ".noarch.rpm" +LOOP_DEVICE_SIZE = "10G" + LOSETUP_SERVICE = """[Unit] Description=Setup loop devices Before=network.target @@ -90,6 +92,13 @@ RemainAfterExit=yes WantedBy=multi-user.target """ +DUPLICATE_COMPUTE_SERVICES = [ + 'OS::TripleO::Services::ComputeNeutronCorePlugin', + 'OS::TripleO::Services::ComputeNeutronMetadataAgent', + 'OS::TripleO::Services::ComputeNeutronOvsAgent', + 'OS::TripleO::Services::ComputeNeutronL3Agent' +] + def build_sdn_env_list(ds, sdn_map, env_list=None): """ @@ -136,24 +145,24 @@ def build_sdn_env_list(ds, sdn_map, env_list=None): return env_list -def get_docker_sdn_file(ds_opts): +def get_docker_sdn_files(ds_opts): """ Returns docker env file for detected SDN :param ds_opts: deploy options - :return: docker THT env file for an SDN + :return: list of docker THT env files for an SDN """ - # FIXME(trozet): We assume right now there is only one docker SDN file docker_services = con.VALID_DOCKER_SERVICES - tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']] + tht_dir = con.THT_DOCKER_ENV_DIR sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP) - for sdn_file in sdn_env_list: + for i, sdn_file in enumerate(sdn_env_list): sdn_base = os.path.basename(sdn_file) if sdn_base in docker_services: if docker_services[sdn_base] is not None: - return os.path.join(tht_dir, - docker_services[sdn_base]) + sdn_env_list[i] = \ + os.path.join(tht_dir, docker_services[sdn_base]) else: - return os.path.join(tht_dir, sdn_base) + sdn_env_list[i] = os.path.join(tht_dir, sdn_base) + return sdn_env_list def create_deploy_cmd(ds, ns, inv, tmp_dir, @@ -182,10 +191,9 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, if ds_opts['containers']: deploy_options.append('docker-images.yaml') - sdn_docker_file = get_docker_sdn_file(ds_opts) - if sdn_docker_file: + sdn_docker_files = get_docker_sdn_files(ds_opts) + for sdn_docker_file in sdn_docker_files: deploy_options.append(sdn_docker_file) - deploy_options.append('sdn-images.yaml') else: deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP) @@ -197,6 +205,8 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, else: deploy_options.append(os.path.join(con.THT_ENV_DIR, v)) + # TODO(trozet) Fix this check to look for if ceph is in controller services + # and not use name of the file if ds_opts['ceph'] and 'csit' not in env_file: prep_storage_env(ds, ns, virtual, tmp_dir) deploy_options.append(os.path.join(con.THT_ENV_DIR, @@ -204,17 +214,18 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, if ds_opts['sriov']: prep_sriov_env(ds, tmp_dir) + # Check for 'k8s' here intentionally, as we may support other values + # such as openstack/openshift for 'vim' option. + if ds_opts['vim'] == 'k8s': + deploy_options.append('kubernetes-environment.yaml') + if virtual: deploy_options.append('virtual-environment.yaml') else: deploy_options.append('baremetal-environment.yaml') num_control, num_compute = inv.get_node_counts() - if num_control == 0 or num_compute == 0: - logging.error("Detected 0 control or compute nodes. Control nodes: " - "{}, compute nodes{}".format(num_control, num_compute)) - raise ApexDeployException("Invalid number of control or computes") - elif num_control > 1 and not ds['global_params']['ha_enabled']: + if num_control > 1 and not ds['global_params']['ha_enabled']: num_control = 1 if platform.machine() == 'aarch64': # aarch64 deploys were not completing in the default 90 mins. @@ -236,12 +247,16 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, if net_data: cmd += ' --networks-file network_data.yaml' libvirt_type = 'kvm' - if virtual: + if virtual and (platform.machine() != 'aarch64'): with open('/sys/module/kvm_intel/parameters/nested') as f: nested_kvm = f.read().strip() if nested_kvm != 'Y': libvirt_type = 'qemu' + elif virtual and (platform.machine() == 'aarch64'): + libvirt_type = 'qemu' cmd += ' --libvirt-type {}'.format(libvirt_type) + if platform.machine() == 'aarch64': + cmd += ' --override-ansible-cfg /home/stack/ansible.cfg ' logging.info("Deploy command set: {}".format(cmd)) with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh: @@ -250,7 +265,7 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None, - patches=None, upstream=False): + patches=None): """ Locates sdn image and preps for deployment. :param ds: deploy settings @@ -260,7 +275,6 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None, :param root_pw: password to configure for overcloud image :param docker_tag: Docker image tag for RDO version (default None) :param patches: List of patches to apply to overcloud image - :param upstream: (boolean) Indicates if upstream deployment or not :return: None """ # TODO(trozet): Come up with a better way to organize this logic in this @@ -300,7 +314,13 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None, "echo 'https_proxy={}' >> /etc/environment".format( ns['https_proxy'])}) + tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2') + shutil.copyfile(img, tmp_oc_image) + logging.debug("Temporary overcloud image stored as: {}".format( + tmp_oc_image)) + if ds_opts['vpn']: + oc_builder.inject_quagga(tmp_oc_image, tmp_dir) virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"}) virt_cmds.append({ con.VIRT_RUN_CMD: @@ -340,15 +360,24 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None, if root_pw: pw_op = "password:{}".format(root_pw) virt_cmds.append({con.VIRT_PW: pw_op}) - if ds_opts['sfc'] and dataplane == 'ovs': - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "yum -y install " - "/root/ovs/rpm/rpmbuild/RPMS/x86_64/" - "{}".format(OVS_NSH_KMOD_RPM)}, - {con.VIRT_RUN_CMD: "yum downgrade -y " - "/root/ovs/rpm/rpmbuild/RPMS/x86_64/" - "{}".format(OVS_NSH_RPM)} - ]) + + if dataplane == 'ovs': + if ds_opts['sfc']: + oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir) + elif sdn == 'opendaylight': + # FIXME(trozet) remove this after RDO is updated with fix for + # https://bugzilla.redhat.com/show_bug.cgi?id=1544892 + ovs_file = os.path.basename(con.CUSTOM_OVS) + ovs_url = con.CUSTOM_OVS.replace(ovs_file, '') + utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url, + targets=[ovs_file]) + virt_cmds.extend([ + {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir, + ovs_file))}, + {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format( + ovs_file)} + ]) + if dataplane == 'fdio': # Patch neutron with using OVS external interface for router # and add generic linux NS interface driver @@ -362,42 +391,9 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None, "/root/nosdn_vpp_rpms/*.rpm"} ]) - tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2') - shutil.copyfile(img, tmp_oc_image) - logging.debug("Temporary overcloud image stored as: {}".format( - tmp_oc_image)) - - # TODO (trozet): remove this if block after Fraser - if sdn == 'opendaylight' and not upstream: - if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION: - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "yum -y remove opendaylight"}, - {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"}, - {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf " - "/root/puppet-opendaylight-" - "{}.tar.gz".format(ds_opts['odl_version'])} - ]) - if ds_opts['odl_version'] == 'master': - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format( - ds_opts['odl_version'])} - ]) - else: - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format( - ds_opts['odl_version'])} - ]) - - elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \ - and ds_opts['odl_vpp_netvirt']: - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "yum -y remove opendaylight"}, - {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format( - ODL_NETVIRT_VPP_RPM)} - ]) - elif sdn == 'opendaylight': - undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][ - 'installer_vm']['ip'] + undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][ + 'installer_vm']['ip'] + if sdn == 'opendaylight': oc_builder.inject_opendaylight( odl_version=ds_opts['odl_version'], image=tmp_oc_image, @@ -430,10 +426,33 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None, virt_cmds.extend([ {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup) }, - {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'}, + {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}' + .format(LOOP_DEVICE_SIZE)}, {con.VIRT_RUN_CMD: 'systemctl daemon-reload'}, {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'}, ]) + # TODO(trozet) remove this after LP#173474 is fixed + dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service' + virt_cmds.append( + {con.VIRT_RUN_CMD: "crudini --del {} Unit " + "ConditionPathExists".format(dhcp_unit)}) + # Prep for NFS + virt_cmds.extend([ + {con.VIRT_INSTALL: "nfs-utils"}, + {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service " + "/etc/systemd/system/multi-user.target.wants/" + "nfs-server.service"}, + {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/glance"}, + {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/cinder"}, + {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/nova"}, + {con.VIRT_RUN_CMD: "echo '/root/nfs/glance *(rw,sync," + "no_root_squash,no_acl)' > /etc/exports"}, + {con.VIRT_RUN_CMD: "echo '/root/nfs/cinder *(rw,sync," + "no_root_squash,no_acl)' >> /etc/exports"}, + {con.VIRT_RUN_CMD: "echo '/root/nfs/nova *(rw,sync," + "no_root_squash,no_acl)' >> /etc/exports"}, + {con.VIRT_RUN_CMD: "exportfs -avr"}, + ]) virt_utils.virt_customize(virt_cmds, tmp_oc_image) logging.info("Overcloud image customization complete") return patched_containers @@ -488,6 +507,10 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): # SSH keys private_key, public_key = make_ssh_key() + num_control, num_compute = inv.get_node_counts() + if num_control > 1 and not ds['global_params']['ha_enabled']: + num_control = 1 + # Make easier/faster variables to index in the file editor if 'performance' in ds_opts: perf = True @@ -515,6 +538,10 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): else: perf = False + tenant_settings = ns['networks']['tenant'] + tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \ + ns['networks']['tenant'].get('segmentation_type') == 'vlan' + # Modify OPNFV environment # TODO: Change to build a dict and outputting yaml rather than parsing for line in fileinput.input(tmp_opnfv_env, inplace=True): @@ -538,6 +565,46 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): ds_opts['dataplane'] == 'ovs_dpdk': output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \ './ovs-dpdk-preconfig.yaml' + elif 'NeutronNetworkVLANRanges' in line: + vlan_setting = '' + if tenant_vlan_enabled: + if ns['networks']['tenant']['overlay_id_range']: + vlan_setting = ns['networks']['tenant']['overlay_id_range'] + if 'datacentre' not in vlan_setting: + vlan_setting += ',datacentre:1:1000' + # SRIOV networks are VLAN based provider networks. In order to + # simplify the deployment, nfv_sriov will be the default physnet. + # VLANs are not needed in advance, and the user will have to create + # the network specifying the segmentation-id. + if ds_opts['sriov']: + if vlan_setting: + vlan_setting += ",nfv_sriov" + else: + vlan_setting = "datacentre:1:1000,nfv_sriov" + if vlan_setting: + output_line = " NeutronNetworkVLANRanges: " + vlan_setting + elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled: + if tenant_settings['overlay_id_range']: + physnets = tenant_settings['overlay_id_range'].split(',') + output_line = " NeutronBridgeMappings: " + for physnet in physnets: + physnet_name = physnet.split(':')[0] + if physnet_name != 'datacentre': + output_line += "{}:br-vlan,".format(physnet_name) + output_line += "datacentre:br-ex" + elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \ + and ds_opts['sdn_controller'] == 'opendaylight': + if tenant_settings['overlay_id_range']: + physnets = tenant_settings['overlay_id_range'].split(',') + output_line = " OpenDaylightProviderMappings: " + for physnet in physnets: + physnet_name = physnet.split(':')[0] + if physnet_name != 'datacentre': + output_line += "{}:br-vlan,".format(physnet_name) + output_line += "datacentre:br-ex" + elif 'NeutronNetworkType' in line and tenant_vlan_enabled: + output_line = " NeutronNetworkType: vlan\n" \ + " NeutronTunnelTypes: ''" if ds_opts['sdn_controller'] == 'opendaylight' and \ 'odl_vpp_routing_node' in ds_opts: @@ -557,19 +624,15 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): if 'OS::TripleO::Services::NeutronDhcpAgent' in line: output_line = '' elif 'NeutronDhcpAgentsPerNetwork' in line: - num_control, num_compute = inv.get_node_counts() + if num_compute == 0: + num_dhcp_agents = num_control + else: + num_dhcp_agents = num_compute output_line = (" NeutronDhcpAgentsPerNetwork: {}" - .format(num_compute)) + .format(num_dhcp_agents)) elif 'ComputeServices' in line: output_line = (" ComputeServices:\n" " - OS::TripleO::Services::NeutronDhcpAgent") - # SRIOV networks are VLAN based provider networks. In order to simplify - # the deployment, nfv_sriov will be the default physnet. VLANs are not - # needed in advance, and the user will have to create the network - # specifying the segmentation-id. - if ds_opts['sriov']: - if 'NeutronNetworkVLANRanges' in line: - output_line = ("{},nfv_sriov'".format(line[:-1])) if perf: for role in 'NovaCompute', 'Controller': @@ -632,7 +695,46 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): print(output_line) + # Merge compute services into control services if only a single + # node deployment + if num_compute == 0: + logging.info("All in one deployment. Checking if service merging " + "required into control services") + with open(tmp_opnfv_env, 'r') as fh: + data = yaml.safe_load(fh) + param_data = data['parameter_defaults'] + # Check to see if any parameters are set for Compute + for param in param_data.keys(): + if param != 'ComputeServices' and param.startswith('Compute'): + logging.warning("Compute parameter set, but will not be used " + "in deployment: {}. Please use Controller " + "based parameters when using All-in-one " + "deployments".format(param)) + if ('ControllerServices' in param_data and 'ComputeServices' in + param_data): + logging.info("Services detected in environment file. Merging...") + ctrl_services = param_data['ControllerServices'] + cmp_services = param_data['ComputeServices'] + param_data['ControllerServices'] = list(set().union( + ctrl_services, cmp_services)) + for dup_service in DUPLICATE_COMPUTE_SERVICES: + if dup_service in param_data['ControllerServices']: + param_data['ControllerServices'].remove(dup_service) + param_data.pop('ComputeServices') + logging.debug("Merged controller services: {}".format( + pprint.pformat(param_data['ControllerServices']) + )) + with open(tmp_opnfv_env, 'w') as fh: + yaml.safe_dump(data, fh, default_flow_style=False) + else: + logging.info("No services detected in env file, not merging " + "services") + logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env)) + with open(tmp_opnfv_env, 'r') as fh: + logging.debug("opnfv-environment content is : {}".format( + pprint.pformat(yaml.safe_load(fh.read())) + )) def generate_ceph_key(): @@ -676,15 +778,7 @@ def prep_storage_env(ds, ns, virtual, tmp_dir): print(line) if ds_opts['containers']: - undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][ - 'installer_vm']['ip'] - ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']] - docker_image = "{}:8787/ceph/daemon:tag-build-master-" \ - "{}-centos-7".format(undercloud_admin_ip, - ceph_version) - ceph_params = { - 'DockerCephDaemonImage': docker_image, - } + ceph_params = {} # max pgs allowed are calculated as num_mons * 200. Therefore we # set number of pgs and pools so that the total will be less: @@ -794,8 +888,7 @@ def external_network_cmds(ns, ds): "--allocation-pool start={},end={} --subnet-range " \ "{}".format(gateway, pool_start, pool_end, str(cidr)) if external and cidr.version == 6: - subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \ - '--ipv6-address-mode slaac' + subnet_cmd += ' --ip-version 6' cmds.append(subnet_cmd) logging.debug("Neutron external network commands determined " "as: {}".format(cmds))