import logging
import os
import platform
+import pprint
import shutil
import uuid
import struct
import time
+import yaml
import apex.builders.overcloud_builder as oc_builder
import apex.builders.common_builder as c_builder
ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
".noarch.rpm"
+LOOP_DEVICE_SIZE = "10G"
+
LOSETUP_SERVICE = """[Unit]
Description=Setup loop devices
Before=network.target
WantedBy=multi-user.target
"""
+DUPLICATE_COMPUTE_SERVICES = [
+ 'OS::TripleO::Services::ComputeNeutronCorePlugin',
+ 'OS::TripleO::Services::ComputeNeutronMetadataAgent',
+ 'OS::TripleO::Services::ComputeNeutronOvsAgent',
+ 'OS::TripleO::Services::ComputeNeutronL3Agent'
+]
+
def build_sdn_env_list(ds, sdn_map, env_list=None):
"""
return env_list
-def get_docker_sdn_file(ds_opts):
+def get_docker_sdn_files(ds_opts):
"""
Returns docker env file for detected SDN
:param ds_opts: deploy options
- :return: docker THT env file for an SDN
+ :return: list of docker THT env files for an SDN
"""
- # FIXME(trozet): We assume right now there is only one docker SDN file
docker_services = con.VALID_DOCKER_SERVICES
tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
- for sdn_file in sdn_env_list:
+ for i, sdn_file in enumerate(sdn_env_list):
sdn_base = os.path.basename(sdn_file)
if sdn_base in docker_services:
if docker_services[sdn_base] is not None:
- return os.path.join(tht_dir,
- docker_services[sdn_base])
+ sdn_env_list[i] = \
+ os.path.join(tht_dir, docker_services[sdn_base])
else:
- return os.path.join(tht_dir, sdn_base)
+ sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
+ return sdn_env_list
def create_deploy_cmd(ds, ns, inv, tmp_dir,
if ds_opts['containers']:
deploy_options.append('docker-images.yaml')
- sdn_docker_file = get_docker_sdn_file(ds_opts)
- if sdn_docker_file:
+ sdn_docker_files = get_docker_sdn_files(ds_opts)
+ for sdn_docker_file in sdn_docker_files:
deploy_options.append(sdn_docker_file)
+ if sdn_docker_files:
deploy_options.append('sdn-images.yaml')
else:
deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
deploy_options.append('baremetal-environment.yaml')
num_control, num_compute = inv.get_node_counts()
- if num_control == 0 or num_compute == 0:
- logging.error("Detected 0 control or compute nodes. Control nodes: "
- "{}, compute nodes{}".format(num_control, num_compute))
- raise ApexDeployException("Invalid number of control or computes")
- elif num_control > 1 and not ds['global_params']['ha_enabled']:
+ if num_control > 1 and not ds['global_params']['ha_enabled']:
num_control = 1
if platform.machine() == 'aarch64':
# aarch64 deploys were not completing in the default 90 mins.
"echo 'https_proxy={}' >> /etc/environment".format(
ns['https_proxy'])})
+ tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
+ shutil.copyfile(img, tmp_oc_image)
+ logging.debug("Temporary overcloud image stored as: {}".format(
+ tmp_oc_image))
+
if ds_opts['vpn']:
+ oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
virt_cmds.append({
con.VIRT_RUN_CMD:
if root_pw:
pw_op = "password:{}".format(root_pw)
virt_cmds.append({con.VIRT_PW: pw_op})
- if ds_opts['sfc'] and dataplane == 'ovs':
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y install "
- "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
- "{}".format(OVS_NSH_KMOD_RPM)},
- {con.VIRT_RUN_CMD: "yum downgrade -y "
- "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
- "{}".format(OVS_NSH_RPM)}
- ])
+
+ if dataplane == 'ovs':
+ if ds_opts['sfc']:
+ virt_cmds.extend([
+ {con.VIRT_RUN_CMD: "yum -y install "
+ "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
+ "{}".format(OVS_NSH_KMOD_RPM)},
+ {con.VIRT_RUN_CMD: "yum downgrade -y "
+ "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
+ "{}".format(OVS_NSH_RPM)}
+ ])
+ elif sdn == 'opendaylight':
+ # FIXME(trozet) remove this after RDO is updated with fix for
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
+ ovs_file = os.path.basename(con.CUSTOM_OVS)
+ ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
+ utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
+ targets=[ovs_file])
+ virt_cmds.extend([
+ {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
+ ovs_file))},
+ {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
+ ovs_file)}
+ ])
if dataplane == 'fdio':
# Patch neutron with using OVS external interface for router
# and add generic linux NS interface driver
"/root/nosdn_vpp_rpms/*.rpm"}
])
- tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
- shutil.copyfile(img, tmp_oc_image)
- logging.debug("Temporary overcloud image stored as: {}".format(
- tmp_oc_image))
-
+ undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
+ 'installer_vm']['ip']
if sdn == 'opendaylight':
- undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
- 'installer_vm']['ip']
oc_builder.inject_opendaylight(
odl_version=ds_opts['odl_version'],
image=tmp_oc_image,
virt_cmds.extend([
{con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
},
- {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
+ {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
+ .format(LOOP_DEVICE_SIZE)},
{con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
{con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
])
+ # TODO(trozet) remove this after LP#173474 is fixed
+ dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
+ virt_cmds.append(
+ {con.VIRT_RUN_CMD: "crudini --del {} Unit "
+ "ConditionPathExists".format(dhcp_unit)})
virt_utils.virt_customize(virt_cmds, tmp_oc_image)
logging.info("Overcloud image customization complete")
return patched_containers
# SSH keys
private_key, public_key = make_ssh_key()
+ num_control, num_compute = inv.get_node_counts()
+ if num_control > 1 and not ds['global_params']['ha_enabled']:
+ num_control = 1
+
# Make easier/faster variables to index in the file editor
if 'performance' in ds_opts:
perf = True
else:
perf = False
+ tenant_settings = ns['networks']['tenant']
+ tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
+ ns['networks']['tenant'].get('segmentation_type') == 'vlan'
+
# Modify OPNFV environment
# TODO: Change to build a dict and outputting yaml rather than parsing
for line in fileinput.input(tmp_opnfv_env, inplace=True):
ds_opts['dataplane'] == 'ovs_dpdk':
output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
'./ovs-dpdk-preconfig.yaml'
+ elif 'NeutronNetworkVLANRanges' in line:
+ vlan_setting = ''
+ if tenant_vlan_enabled:
+ if ns['networks']['tenant']['overlay_id_range']:
+ vlan_setting = ns['networks']['tenant']['overlay_id_range']
+ if 'datacentre' not in vlan_setting:
+ vlan_setting += ',datacentre:1:1000'
+ # SRIOV networks are VLAN based provider networks. In order to
+ # simplify the deployment, nfv_sriov will be the default physnet.
+ # VLANs are not needed in advance, and the user will have to create
+ # the network specifying the segmentation-id.
+ if ds_opts['sriov']:
+ if vlan_setting:
+ vlan_setting += ",nfv_sriov"
+ else:
+ vlan_setting = "datacentre:1:1000,nfv_sriov"
+ if vlan_setting:
+ output_line = " NeutronNetworkVLANRanges: " + vlan_setting
+ elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
+ if tenant_settings['overlay_id_range']:
+ physnets = tenant_settings['overlay_id_range'].split(',')
+ output_line = " NeutronBridgeMappings: "
+ for physnet in physnets:
+ physnet_name = physnet.split(':')[0]
+ if physnet_name != 'datacentre':
+ output_line += "{}:br-vlan,".format(physnet_name)
+ output_line += "datacentre:br-ex"
+ elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
+ and ds_opts['sdn_controller'] == 'opendaylight':
+ if tenant_settings['overlay_id_range']:
+ physnets = tenant_settings['overlay_id_range'].split(',')
+ output_line = " OpenDaylightProviderMappings: "
+ for physnet in physnets:
+ physnet_name = physnet.split(':')[0]
+ if physnet_name != 'datacentre':
+ output_line += "{}:br-vlan,".format(physnet_name)
+ output_line += "datacentre:br-ex"
+ elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
+ output_line = " NeutronNetworkType: vlan\n" \
+ " NeutronTunnelTypes: ''"
if ds_opts['sdn_controller'] == 'opendaylight' and \
'odl_vpp_routing_node' in ds_opts:
if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
output_line = ''
elif 'NeutronDhcpAgentsPerNetwork' in line:
- num_control, num_compute = inv.get_node_counts()
+ if num_compute == 0:
+ num_dhcp_agents = num_control
+ else:
+ num_dhcp_agents = num_compute
output_line = (" NeutronDhcpAgentsPerNetwork: {}"
- .format(num_compute))
+ .format(num_dhcp_agents))
elif 'ComputeServices' in line:
output_line = (" ComputeServices:\n"
" - OS::TripleO::Services::NeutronDhcpAgent")
- # SRIOV networks are VLAN based provider networks. In order to simplify
- # the deployment, nfv_sriov will be the default physnet. VLANs are not
- # needed in advance, and the user will have to create the network
- # specifying the segmentation-id.
- if ds_opts['sriov']:
- if 'NeutronNetworkVLANRanges' in line:
- output_line = ("{},nfv_sriov'".format(line[:-1]))
if perf:
for role in 'NovaCompute', 'Controller':
print(output_line)
+ # Merge compute services into control services if only a single
+ # node deployment
+ if num_compute == 0:
+ logging.info("All in one deployment. Checking if service merging "
+ "required into control services")
+ with open(tmp_opnfv_env, 'r') as fh:
+ data = yaml.safe_load(fh)
+ param_data = data['parameter_defaults']
+ # Check to see if any parameters are set for Compute
+ for param in param_data.keys():
+ if param != 'ComputeServices' and param.startswith('Compute'):
+ logging.warning("Compute parameter set, but will not be used "
+ "in deployment: {}. Please use Controller "
+ "based parameters when using All-in-one "
+ "deployments".format(param))
+ if ('ControllerServices' in param_data and 'ComputeServices' in
+ param_data):
+ logging.info("Services detected in environment file. Merging...")
+ ctrl_services = param_data['ControllerServices']
+ cmp_services = param_data['ComputeServices']
+ param_data['ControllerServices'] = list(set().union(
+ ctrl_services, cmp_services))
+ for dup_service in DUPLICATE_COMPUTE_SERVICES:
+ if dup_service in param_data['ControllerServices']:
+ param_data['ControllerServices'].remove(dup_service)
+ param_data.pop('ComputeServices')
+ logging.debug("Merged controller services: {}".format(
+ pprint.pformat(param_data['ControllerServices'])
+ ))
+ with open(tmp_opnfv_env, 'w') as fh:
+ yaml.safe_dump(data, fh, default_flow_style=False)
+ else:
+ logging.info("No services detected in env file, not merging "
+ "services")
+
logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
+ with open(tmp_opnfv_env, 'r') as fh:
+ logging.debug("opnfv-environment content is : {}".format(
+ pprint.pformat(yaml.safe_load(fh.read()))
+ ))
def generate_ceph_key():
print(line)
if ds_opts['containers']:
- undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
- 'installer_vm']['ip']
- ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
- docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
- "{}-centos-7".format(undercloud_admin_ip,
- ceph_version)
- ceph_params = {
- 'DockerCephDaemonImage': docker_image,
- }
+ ceph_params = {}
# max pgs allowed are calculated as num_mons * 200. Therefore we
# set number of pgs and pools so that the total will be less:
"--allocation-pool start={},end={} --subnet-range " \
"{}".format(gateway, pool_start, pool_end, str(cidr))
if external and cidr.version == 6:
- subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
- '--ipv6-address-mode slaac'
+ subnet_cmd += ' --ip-version 6'
cmds.append(subnet_cmd)
logging.debug("Neutron external network commands determined "
"as: {}".format(cmds))