X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=apex%2Fovercloud%2Fovercloud_deploy.py;h=93732bf375497babd25b421d92656923550185bc;hb=e6a9d7cebc9f6083a72863102138ab76a75ae3c9;hp=5ee2a33d3c4dc2e675c956b91d0a79b4b6fed3a6;hpb=15d8c6b0dff0153a5d106d827442e65fe4c9b559;p=apex.git diff --git a/apex/overcloud/overcloud_deploy.py b/apex/overcloud/overcloud_deploy.py index 5ee2a33d..93732bf3 100644 --- a/apex/overcloud/overcloud_deploy.py +++ b/apex/overcloud/overcloud_deploy.py @@ -20,7 +20,7 @@ import time from apex.common import constants as con from apex.common.exceptions import ApexDeployException from apex.common import parsers -from apex.virtual import virtual_utils as virt_utils +from apex.virtual import utils as virt_utils from cryptography.hazmat.primitives import serialization as \ crypto_serialization from cryptography.hazmat.primitives.asymmetric import rsa @@ -30,11 +30,12 @@ from cryptography.hazmat.backends import default_backend as \ SDN_FILE_MAP = { 'opendaylight': { - 'sfc': 'opendaylight_sfc.yaml', + 'sfc': 'neutron-sfc-opendaylight.yaml', 'vpn': 'neutron-bgpvpn-opendaylight.yaml', 'gluon': 'gluon.yaml', 'vpp': { 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml', + 'dvr': 'neutron-opendaylight-fdio-dvr.yaml', 'default': 'neutron-opendaylight-honeycomb.yaml' }, 'default': 'neutron-opendaylight.yaml', @@ -85,13 +86,41 @@ def build_sdn_env_list(ds, sdn_map, env_list=None): if len(env_list) == 0: try: env_list.append(os.path.join( - con.THT_ENV_DIR, sdn_map[ds['sdn_controller']]['default'])) + con.THT_ENV_DIR, sdn_map['default'])) except KeyError: logging.warning("Unable to find default file for SDN") return env_list +def _get_node_counts(inventory): + """ + Return numbers of controller and compute nodes in inventory + + :param inventory: node inventory data structure + :return: number of controller and compute nodes in inventory + """ + if not inventory: + raise ApexDeployException("Empty inventory") + + nodes = inventory['nodes'] + num_control = 0 + num_compute = 0 + for node in nodes: + if node['capabilities'] == 'profile:control': + num_control += 1 + elif node['capabilities'] == 'profile:compute': + num_compute += 1 + else: + # TODO(trozet) do we want to allow capabilities to not exist? + logging.error("Every node must include a 'capabilities' key " + "tagged with either 'profile:control' or " + "'profile:compute'") + raise ApexDeployException("Node missing capabilities " + "key: {}".format(node)) + return num_control, num_compute + + def create_deploy_cmd(ds, ns, inv, tmp_dir, virtual, env_file='opnfv-environment.yaml'): @@ -100,7 +129,6 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, ds_opts = ds['deploy_options'] deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP) - # TODO(trozet): make sure rt kvm file is in tht dir for k, v in OTHER_FILE_MAP.items(): if k in ds_opts and ds_opts[k]: deploy_options.append(os.path.join(con.THT_ENV_DIR, v)) @@ -118,21 +146,7 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, else: deploy_options.append('baremetal-environment.yaml') - nodes = inv['nodes'] - num_control = 0 - num_compute = 0 - for node in nodes: - if node['capabilities'] == 'profile:control': - num_control += 1 - elif node['capabilities'] == 'profile:compute': - num_compute += 1 - else: - # TODO(trozet) do we want to allow capabilities to not exist? - logging.error("Every node must include a 'capabilities' key " - "tagged with either 'profile:control' or " - "'profile:compute'") - raise ApexDeployException("Node missing capabilities " - "key: {}".format(node)) + num_control, num_compute = _get_node_counts(inv) if num_control == 0 or num_compute == 0: logging.error("Detected 0 control or compute nodes. Control nodes: " "{}, compute nodes{}".format(num_control, num_compute)) @@ -200,7 +214,7 @@ def prep_image(ds, img, tmp_dir, root_pw=None): os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci', os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic' } - for mod_file, mod in uio_types: + for mod_file, mod in uio_types.items(): with open(mod_file, 'w') as fh: fh.write('#!/bin/bash\n') fh.write('exec /sbin/modprobe {}'.format(mod)) @@ -220,7 +234,7 @@ def prep_image(ds, img, tmp_dir, root_pw=None): {con.VIRT_RUN_CMD: "yum -y install " "/root/ovs/rpm/rpmbuild/RPMS/x86_64/" "{}".format(OVS_NSH_KMOD_RPM)}, - {con.VIRT_RUN_CMD: "yum upgrade -y " + {con.VIRT_RUN_CMD: "yum downgrade -y " "/root/ovs/rpm/rpmbuild/RPMS/x86_64/" "{}".format(OVS_NSH_RPM)} ]) @@ -235,13 +249,22 @@ def prep_image(ds, img, tmp_dir, root_pw=None): if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION: virt_cmds.extend([ {con.VIRT_RUN_CMD: "yum -y remove opendaylight"}, - {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format( - con.DEFAULT_ODL_VERSION)}, {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"}, {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf " "/root/puppet-opendaylight-" "{}.tar.gz".format(ds_opts['odl_version'])} ]) + if ds_opts['odl_version'] == 'master': + virt_cmds.extend([ + {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format( + ds_opts['odl_version'])} + ]) + else: + virt_cmds.extend([ + {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format( + ds_opts['odl_version'])} + ]) + elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \ and ds_opts['odl_vpp_netvirt']: virt_cmds.extend([ @@ -285,15 +308,15 @@ def make_ssh_key(): crypto_serialization.Encoding.OpenSSH, crypto_serialization.PublicFormat.OpenSSH ) - pub_key = re.sub('ssh-rsa\s*', '', public_key.decode('utf-8')) - return private_key.decode('utf-8'), pub_key + return private_key.decode('utf-8'), public_key.decode('utf-8') -def prep_env(ds, ns, opnfv_env, net_env, tmp_dir): +def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): """ Creates modified opnfv/network environments for deployment :param ds: deploy settings :param ns: network settings + :param inv: node inventory :param opnfv_env: file path for opnfv-environment file :param net_env: file path for network-environment file :param tmp_dir: Apex tmp dir @@ -339,37 +362,56 @@ def prep_env(ds, ns, opnfv_env, net_env, tmp_dir): perf = False # Modify OPNFV environment + # TODO: Change to build a dict and outputing yaml rather than parsing for line in fileinput.input(tmp_opnfv_env, inplace=True): line = line.strip('\n') + output_line = line if 'CloudDomain' in line: - print(" CloudDomain: {}".format(ns['domain_name'])) - elif ds_opts['sdn_controller'] == 'opendaylight' and \ - 'odl_vpp_routing_node' in ds_opts and ds_opts[ - 'odl_vpp_routing_node'] != 'dvr': + output_line = " CloudDomain: {}".format(ns['domain_name']) + elif 'replace_private_key' in line: + output_line = " private_key: |\n" + key_out = '' + for line in private_key.splitlines(): + key_out += " {}\n".format(line) + output_line += key_out + elif 'replace_public_key' in line: + output_line = " public_key: '{}'".format(public_key) + + if ds_opts['sdn_controller'] == 'opendaylight' and \ + 'odl_vpp_routing_node' in ds_opts: if 'opendaylight::vpp_routing_node' in line: - print(" opendaylight::vpp_routing_node: ${}.${}".format( - ds_opts['odl_vpp_routing_node'], ns['domain_name'])) + output_line = (" opendaylight::vpp_routing_node: {}.{}" + .format(ds_opts['odl_vpp_routing_node'], + ns['domain_name'])) elif 'ControllerExtraConfig' in line: - print(" ControllerExtraConfig:\n " - "tripleo::profile::base::neutron::agents::honeycomb" - "::interface_role_mapping: ['{}:tenant-" - "interface]'".format(tenant_ctrl_nic)) + output_line = (" ControllerExtraConfig:\n " + "tripleo::profile::base::neutron::agents::" + "honeycomb::interface_role_mapping:" + " ['{}:tenant-interface]'" + .format(tenant_ctrl_nic)) elif 'NovaComputeExtraConfig' in line: - print(" NovaComputeExtraConfig:\n " - "tripleo::profile::base::neutron::agents::honeycomb" - "::interface_role_mapping: ['{}:tenant-" - "interface]'".format(tenant_comp_nic)) - else: - print(line) - + output_line = (" NovaComputeExtraConfig:\n " + "tripleo::profile::base::neutron::agents::" + "honeycomb::interface_role_mapping:" + " ['{}:tenant-interface]'" + .format(tenant_comp_nic)) elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio': if 'NeutronVPPAgentPhysnets' in line: - print(" NeutronVPPAgentPhysnets: 'datacentre:{}'".format( - tenant_ctrl_nic)) - else: - print(line) - elif perf: - line_printed = False + output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'". + format(tenant_ctrl_nic)) + elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get( + 'dvr') is True: + if 'OS::TripleO::Services::NeutronDhcpAgent' in line: + output_line = '' + elif 'NeutronDhcpAgentsPerNetwork' in line: + num_control, num_compute = _get_node_counts(inv) + output_line = (" NeutronDhcpAgentsPerNetwork: {}" + .format(num_compute)) + elif 'ComputeServices' in line: + output_line = (" ComputeServices:\n" + " - OS::TripleO::Services::NeutronDhcpAgent") + + if perf: for role in 'NovaCompute', 'Controller': if role == 'NovaCompute': perf_opts = perf_vpp_comp @@ -377,65 +419,52 @@ def prep_env(ds, ns, opnfv_env, net_env, tmp_dir): perf_opts = perf_vpp_ctrl cfg = "{}ExtraConfig".format(role) if cfg in line and perf_opts: + perf_line = '' if 'main-core' in perf_opts: - print(" {}:\n" - " fdio::vpp_cpu_main_core: '{}'" - "".format(cfg, perf_opts['main-core'])) - line_printed = True - break - elif 'corelist-workers' in perf_vpp_comp: - print(" {}:\n" - " fdio::vpp_cpu_corelist_workers: '{}'" - "".format(cfg, perf_opts['corelist-workers'])) - line_printed = True - break + perf_line += ("\n fdio::vpp_cpu_main_core: '{}'" + .format(perf_opts['main-core'])) + if 'corelist-workers' in perf_opts: + perf_line += ("\n " + "fdio::vpp_cpu_corelist_workers: '{}'" + .format(perf_opts['corelist-workers'])) + if perf_line: + output_line = (" {}:{}".format(cfg, perf_line)) # kernel args # (FIXME) use compute's kernel settings for all nodes for now. if 'ComputeKernelArgs' in line and perf_kern_comp: kernel_args = '' for k, v in perf_kern_comp.items(): - kernel_args += "{}={}".format(k, v) + kernel_args += "{}={} ".format(k, v) if kernel_args: - print("ComputeKernelArgs: '{}'".format(kernel_args)) - line_printed = True - elif ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp: + output_line = " ComputeKernelArgs: '{}'".\ + format(kernel_args) + if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp: for k, v in OVS_PERF_MAP.items(): if k in line and v in perf_ovs_comp: - print(" {}: {}".format(k, perf_ovs_comp[v])) - line_printed = True + output_line = " {}: '{}'".format(k, perf_ovs_comp[v]) - if not line_printed: - print(line) - elif 'replace_private_key' in line: - print(" key: '{}'".format(private_key)) - elif 'replace_public_key' in line: - print(" key: '{}'".format(public_key)) - else: - print(line) + print(output_line) logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env)) # Modify Network environment for line in fileinput.input(net_env, inplace=True): line = line.strip('\n') - if ds_opts['dataplane'] == 'ovs_dpdk': - if 'ComputeExtraConfigPre' in line: - print(' OS::TripleO::ComputeExtraConfigPre: ' - './ovs-dpdk-preconfig.yaml') - else: - print(line) - elif perf and perf_kern_comp: - if 'resource_registry' in line: - print("resource_registry:\n" - " OS::TripleO::NodeUserData: first-boot.yaml") - elif 'NovaSchedulerDefaultFilters' in line: - print(" NovaSchedulerDefaultFilters: 'RamFilter," - "ComputeFilter,AvailabilityZoneFilter," - "ComputeCapabilitiesFilter,ImagePropertiesFilter," - "NUMATopologyFilter'") - else: - print(line) + if 'ComputeExtraConfigPre' in line and \ + ds_opts['dataplane'] == 'ovs_dpdk': + print(' OS::TripleO::ComputeExtraConfigPre: ' + './ovs-dpdk-preconfig.yaml') + elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \ + 'resource_registry' in line: + print("resource_registry:\n" + " OS::TripleO::NodeUserData: first-boot.yaml") + elif perf and perf_kern_comp and \ + 'NovaSchedulerDefaultFilters' in line: + print(" NovaSchedulerDefaultFilters: 'RamFilter," + "ComputeFilter,AvailabilityZoneFilter," + "ComputeCapabilitiesFilter,ImagePropertiesFilter," + "NUMATopologyFilter'") else: print(line) @@ -552,7 +581,7 @@ def create_congress_cmds(overcloud_file): else: cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg) if driver == 'nova': - cmd += '--config api_version="2.34"' + cmd += ' --config api_version="2.34"' logging.debug("Congress command created: {}".format(cmd)) cmds.append(cmd) return cmds