ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
".noarch.rpm"
+LOOP_DEVICE_SIZE = "10G"
+
LOSETUP_SERVICE = """[Unit]
Description=Setup loop devices
Before=network.target
if ds_opts['sriov']:
prep_sriov_env(ds, tmp_dir)
+ # Check for 'k8s' here intentionally, as we may support other values
+ # such as openstack/openshift for 'vim' option.
+ if ds_opts['vim'] == 'k8s':
+ deploy_options.append('kubernetes-environment.yaml')
+
if virtual:
deploy_options.append('virtual-environment.yaml')
else:
def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
- patches=None, upstream=False):
+ patches=None):
"""
Locates sdn image and preps for deployment.
:param ds: deploy settings
:param root_pw: password to configure for overcloud image
:param docker_tag: Docker image tag for RDO version (default None)
:param patches: List of patches to apply to overcloud image
- :param upstream: (boolean) Indicates if upstream deployment or not
:return: None
"""
# TODO(trozet): Come up with a better way to organize this logic in this
logging.debug("Temporary overcloud image stored as: {}".format(
tmp_oc_image))
- # TODO (trozet): remove this if block after Fraser
- if sdn == 'opendaylight' and not upstream:
- if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
- {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
- {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
- "/root/puppet-opendaylight-"
- "{}.tar.gz".format(ds_opts['odl_version'])}
- ])
- if ds_opts['odl_version'] == 'master':
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
- ds_opts['odl_version'])}
- ])
- else:
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
- ds_opts['odl_version'])}
- ])
-
- elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
- and ds_opts['odl_vpp_netvirt']:
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
- {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
- ODL_NETVIRT_VPP_RPM)}
- ])
- elif sdn == 'opendaylight':
+ if sdn == 'opendaylight':
undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
'installer_vm']['ip']
oc_builder.inject_opendaylight(
docker_tag=docker_tag))
# if containers with ceph, and no ceph device we need to use a
# persistent loop device for Ceph OSDs
- if docker_tag and not ds_opts.get('ceph_device', None):
+ if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
with open(tmp_losetup, 'w') as fh:
fh.write(LOSETUP_SERVICE)
virt_cmds.extend([
{con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
},
- {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
+ {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
+ .format(LOOP_DEVICE_SIZE)},
{con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
{con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
])
+ # TODO(trozet) remove this after LP#173474 is fixed
+ dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
+ virt_cmds.append(
+ {con.VIRT_RUN_CMD: "crudini --del {} Unit "
+ "ConditionPathExists".format(dhcp_unit)})
virt_utils.virt_customize(virt_cmds, tmp_oc_image)
logging.info("Overcloud image customization complete")
return patched_containers
else:
perf = False
+ tenant_settings = ns['networks']['tenant']
+ tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
+ ns['networks']['tenant'].get('segmentation_type') == 'vlan'
+
# Modify OPNFV environment
# TODO: Change to build a dict and outputting yaml rather than parsing
for line in fileinput.input(tmp_opnfv_env, inplace=True):
ds_opts['dataplane'] == 'ovs_dpdk':
output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
'./ovs-dpdk-preconfig.yaml'
+ elif 'NeutronNetworkVLANRanges' in line:
+ vlan_setting = ''
+ if tenant_vlan_enabled:
+ if ns['networks']['tenant']['overlay_id_range']:
+ vlan_setting = ns['networks']['tenant']['overlay_id_range']
+ if 'datacentre' not in vlan_setting:
+ vlan_setting += ',datacentre:1:1000'
+ # SRIOV networks are VLAN based provider networks. In order to
+ # simplify the deployment, nfv_sriov will be the default physnet.
+ # VLANs are not needed in advance, and the user will have to create
+ # the network specifying the segmentation-id.
+ if ds_opts['sriov']:
+ if vlan_setting:
+ vlan_setting += ",nfv_sriov"
+ else:
+ vlan_setting = "datacentre:1:1000,nfv_sriov"
+ if vlan_setting:
+ output_line = " NeutronNetworkVLANRanges: " + vlan_setting
+ elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
+ if tenant_settings['overlay_id_range']:
+ physnets = tenant_settings['overlay_id_range'].split(',')
+ output_line = " NeutronBridgeMappings: "
+ for physnet in physnets:
+ physnet_name = physnet.split(':')[0]
+ if physnet_name != 'datacentre':
+ output_line += "{}:br-vlan,".format(physnet_name)
+ output_line += "datacentre:br-ex"
+ elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
+ and ds_opts['sdn_controller'] == 'opendaylight':
+ if tenant_settings['overlay_id_range']:
+ physnets = tenant_settings['overlay_id_range'].split(',')
+ output_line = " OpenDaylightProviderMappings: "
+ for physnet in physnets:
+ physnet_name = physnet.split(':')[0]
+ if physnet_name != 'datacentre':
+ output_line += "{}:br-vlan,".format(physnet_name)
+ output_line += "datacentre:br-ex"
+ elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
+ output_line = " NeutronNetworkType: vlan\n" \
+ " NeutronTunnelTypes: ''"
if ds_opts['sdn_controller'] == 'opendaylight' and \
'odl_vpp_routing_node' in ds_opts:
elif 'ComputeServices' in line:
output_line = (" ComputeServices:\n"
" - OS::TripleO::Services::NeutronDhcpAgent")
- # SRIOV networks are VLAN based provider networks. In order to simplify
- # the deployment, nfv_sriov will be the default physnet. VLANs are not
- # needed in advance, and the user will have to create the network
- # specifying the segmentation-id.
- if ds_opts['sriov']:
- if 'NeutronNetworkVLANRanges' in line:
- output_line = ("{},nfv_sriov'".format(line[:-1]))
if perf:
for role in 'NovaCompute', 'Controller':
print(line)
if ds_opts['containers']:
- undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
- 'installer_vm']['ip']
- ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
- docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
- "{}-centos-7".format(undercloud_admin_ip,
- ceph_version)
- ceph_params = {
- 'DockerCephDaemonImage': docker_image,
- }
- if not ds['global_params']['ha_enabled']:
- ceph_params['CephPoolDefaultSize'] = 1
+ ceph_params = {}
+ # max pgs allowed are calculated as num_mons * 200. Therefore we
+ # set number of pgs and pools so that the total will be less:
+ # num_pgs * num_pools * num_osds
+ ceph_params['CephPoolDefaultSize'] = 2
+ ceph_params['CephPoolDefaultPgNum'] = 32
if virtual:
ceph_params['CephAnsibleExtraConfig'] = {
'centos_package_dependencies': [],
'ceph_osd_docker_memory_limit': '1g',
'ceph_mds_docker_memory_limit': '1g',
}
- ceph_params['CephPoolDefaultPgNum'] = 32
- if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
- ceph_device = ds_opts['ceph_device']
- else:
- # TODO(trozet): make this DS default after Fraser
- ceph_device = '/dev/loop3'
-
+ ceph_device = ds_opts['ceph_device']
ceph_params['CephAnsibleDisksConfig'] = {
'devices': [ceph_device],
'journal_size': 512,
'osd_scenario': 'collocated'
}
utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
+ # TODO(trozet): remove following block as we only support containers now
elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
with open(storage_file, 'a') as fh:
fh.write(' ExtraConfig:\n')