Baremetal deployments were failing because the ceph PG size was
exceeding the max allowed. Virtual was still working because we lower
the number of pools and pg/osd. This patch changes the values to a
number which should work for both virtual and baremetal. Also includes a
fix which adds the controllers back as OSDs and a few other cleanup
issues.
JIRA: APEX-614
JIRA: APEX-569
Change-Id: I2ad65727ecdcaa0454eb53d25e32b7f1a53cd3a4
Signed-off-by: Tim Rozet <trozet@redhat.com>
docker_tag=docker_tag))
# if containers with ceph, and no ceph device we need to use a
# persistent loop device for Ceph OSDs
docker_tag=docker_tag))
# if containers with ceph, and no ceph device we need to use a
# persistent loop device for Ceph OSDs
- if docker_tag and not ds_opts.get('ceph_device', None):
+ if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
with open(tmp_losetup, 'w') as fh:
fh.write(LOSETUP_SERVICE)
tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
with open(tmp_losetup, 'w') as fh:
fh.write(LOSETUP_SERVICE)
ceph_params = {
'DockerCephDaemonImage': docker_image,
}
ceph_params = {
'DockerCephDaemonImage': docker_image,
}
- if not ds['global_params']['ha_enabled']:
- ceph_params['CephPoolDefaultSize'] = 1
+ # max pgs allowed are calculated as num_mons * 200. Therefore we
+ # set number of pgs and pools so that the total will be less:
+ # num_pgs * num_pools * num_osds
+ ceph_params['CephPoolDefaultSize'] = 2
+ ceph_params['CephPoolDefaultPgNum'] = 32
if virtual:
ceph_params['CephAnsibleExtraConfig'] = {
'centos_package_dependencies': [],
'ceph_osd_docker_memory_limit': '1g',
'ceph_mds_docker_memory_limit': '1g',
}
if virtual:
ceph_params['CephAnsibleExtraConfig'] = {
'centos_package_dependencies': [],
'ceph_osd_docker_memory_limit': '1g',
'ceph_mds_docker_memory_limit': '1g',
}
- ceph_params['CephPoolDefaultPgNum'] = 32
- if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
- ceph_device = ds_opts['ceph_device']
- else:
- # TODO(trozet): make this DS default after Fraser
- ceph_device = '/dev/loop3'
-
+ ceph_device = ds_opts['ceph_device']
ceph_params['CephAnsibleDisksConfig'] = {
'devices': [ceph_device],
'journal_size': 512,
'osd_scenario': 'collocated'
}
utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
ceph_params['CephAnsibleDisksConfig'] = {
'devices': [ceph_device],
'journal_size': 512,
'osd_scenario': 'collocated'
}
utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
+ # TODO(trozet): remove following block as we only support containers now
elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
with open(storage_file, 'a') as fh:
fh.write(' ExtraConfig:\n')
elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
with open(storage_file, 'a') as fh:
fh.write(' ExtraConfig:\n')
'os_version',
'l2gw',
'sriov',
'os_version',
'l2gw',
'sriov',
+ 'containers',
+ 'ceph_device']
OPT_DEPLOY_SETTINGS = ['performance',
'vsperf',
OPT_DEPLOY_SETTINGS = ['performance',
'vsperf',
'yardstick',
'dovetail',
'odl_vpp_routing_node',
'yardstick',
'dovetail',
'odl_vpp_routing_node',
self['deploy_options'][req_set] = 'ovs'
elif req_set == 'ceph':
self['deploy_options'][req_set] = True
self['deploy_options'][req_set] = 'ovs'
elif req_set == 'ceph':
self['deploy_options'][req_set] = True
+ elif req_set == 'ceph_device':
+ self['deploy_options'][req_set] = '/dev/loop3'
elif req_set == 'odl_version':
self['deploy_options'][req_set] = \
constants.DEFAULT_ODL_VERSION
elif req_set == 'odl_version':
self['deploy_options'][req_set] = \
constants.DEFAULT_ODL_VERSION
'DockerCephDaemonImage':
'192.0.2.1:8787/ceph/daemon:tag-build-master-luminous-centos'
'-7',
'DockerCephDaemonImage':
'192.0.2.1:8787/ceph/daemon:tag-build-master-luminous-centos'
'-7',
- 'CephPoolDefaultSize': 1,
+ 'CephPoolDefaultSize': 2,
'CephAnsibleExtraConfig': {
'centos_package_dependencies': [],
'ceph_osd_docker_memory_limit': '1g',
'CephAnsibleExtraConfig': {
'centos_package_dependencies': [],
'ceph_osd_docker_memory_limit': '1g',
owner: root
group: root
become: yes
owner: root
group: root
become: yes
+ - name: Insert Ceph OSDs into Controller role
+ lineinfile:
+ path: /usr/share/openstack-tripleo-heat-templates/roles_data.yaml
+ insertbefore: '^\s*-\sOS::TripleO::Services::CephRbdMirror$'
+ line: ' - OS::TripleO::Services::CephOSD'
+ owner: root
+ group: root
+ become: yes
- name: Upload glance images
shell: "{{ stackrc }} && openstack overcloud image upload"
become: yes
- name: Upload glance images
shell: "{{ stackrc }} && openstack overcloud image upload"
become: yes