3 # Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
4 # Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
6 # Author: Loic Dachary <loic@dachary.org>
8 # This program is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU Library Public License as published by
10 # the Free Software Foundation; either version 2, or (at your option)
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Library Public License for more details.
19 source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
25 export CEPH_MON="127.0.0.1:7101" # git grep '\<7101\>' : there must be only one
27 CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
28 CEPH_ARGS+="--mon-host=$CEPH_MON --mon-osd-prime-pg-temp=false"
30 setup $dir || return 1
31 run_mon $dir a || return 1
32 run_mgr $dir x || return 1
33 # check that erasure code plugins are preloaded
34 CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
35 grep 'load: jerasure.*lrc' $dir/mon.a.log || return 1
36 for id in $(seq 0 10) ; do
37 run_osd $dir $id || return 1
39 create_rbd_pool || return 1
40 wait_for_clean || return 1
41 # check that erasure code plugins are preloaded
42 CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
43 grep 'load: jerasure.*lrc' $dir/osd.0.log || return 1
44 create_erasure_coded_pool ecpool || return 1
46 local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
47 for func in $funcs ; do
48 $func $dir || return 1
51 delete_pool ecpool || return 1
52 teardown $dir || return 1
55 function create_erasure_coded_pool() {
58 ceph osd erasure-code-profile set myprofile \
59 crush-failure-domain=osd || return 1
60 create_pool $poolname 12 12 erasure myprofile \
62 wait_for_clean || return 1
65 function delete_pool() {
68 ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
71 function rados_put_get() {
74 local objname=${3:-SOMETHING}
77 for marker in AAA BBB CCCC DDDD ; do
78 printf "%*s" 1024 $marker
82 # get and put an object, compare they are equal
84 rados --pool $poolname put $objname $dir/ORIGINAL || return 1
85 rados --pool $poolname get $objname $dir/COPY || return 1
86 diff $dir/ORIGINAL $dir/COPY || return 1
90 # take out an OSD used to store the object and
91 # check the object can still be retrieved, which implies
94 local -a initial_osds=($(get_osds $poolname $objname))
95 local last=$((${#initial_osds[@]} - 1))
96 ceph osd out ${initial_osds[$last]} || return 1
97 ! get_osds $poolname $objname | grep '\<'${initial_osds[$last]}'\>' || return 1
98 rados --pool $poolname get $objname $dir/COPY || return 1
99 diff $dir/ORIGINAL $dir/COPY || return 1
100 ceph osd in ${initial_osds[$last]} || return 1
105 function rados_osds_out_in() {
108 local objname=${3:-SOMETHING}
111 for marker in FFFF GGGG HHHH IIII ; do
112 printf "%*s" 1024 $marker
116 # get and put an object, compare they are equal
118 rados --pool $poolname put $objname $dir/ORIGINAL || return 1
119 rados --pool $poolname get $objname $dir/COPY || return 1
120 diff $dir/ORIGINAL $dir/COPY || return 1
124 # take out two OSDs used to store the object, wait for the cluster
125 # to be clean (i.e. all PG are clean and active) again which
126 # implies the PG have been moved to use the remaining OSDs. Check
127 # the object can still be retrieved.
129 wait_for_clean || return 1
130 local osds_list=$(get_osds $poolname $objname)
131 local -a osds=($osds_list)
133 ceph osd out ${osds[$osd]} || return 1
135 wait_for_clean || return 1
137 # verify the object is no longer mapped to the osds that are out
140 ! get_osds $poolname $objname | grep '\<'${osds[$osd]}'\>' || return 1
142 rados --pool $poolname get $objname $dir/COPY || return 1
143 diff $dir/ORIGINAL $dir/COPY || return 1
145 # bring the osds back in, , wait for the cluster
146 # to be clean (i.e. all PG are clean and active) again which
147 # implies the PG go back to using the same osds as before
150 ceph osd in ${osds[$osd]} || return 1
152 wait_for_clean || return 1
153 test "$osds_list" = "$(get_osds $poolname $objname)" || return 1
157 function TEST_rados_put_get_lrc_advanced() {
159 local poolname=pool-lrc-a
160 local profile=profile-lrc-a
162 ceph osd erasure-code-profile set $profile \
165 crush-steps='[ [ "chooseleaf", "osd", 0 ] ]' \
166 layers='[ [ "DDc", "" ] ]' || return 1
167 create_pool $poolname 12 12 erasure $profile \
170 rados_put_get $dir $poolname || return 1
172 delete_pool $poolname
173 ceph osd erasure-code-profile rm $profile
176 function TEST_rados_put_get_lrc_kml() {
178 local poolname=pool-lrc
179 local profile=profile-lrc
181 ceph osd erasure-code-profile set $profile \
184 crush-failure-domain=osd || return 1
185 create_pool $poolname 12 12 erasure $profile \
188 rados_put_get $dir $poolname || return 1
190 delete_pool $poolname
191 ceph osd erasure-code-profile rm $profile
194 function TEST_rados_put_get_isa() {
195 if ! erasure_code_plugin_exists isa ; then
196 echo "SKIP because plugin isa has not been built"
200 local poolname=pool-isa
202 ceph osd erasure-code-profile set profile-isa \
204 crush-failure-domain=osd || return 1
205 create_pool $poolname 1 1 erasure profile-isa \
208 rados_put_get $dir $poolname || return 1
210 delete_pool $poolname
213 function TEST_rados_put_get_jerasure() {
216 rados_put_get $dir ecpool || return 1
218 local poolname=pool-jerasure
219 local profile=profile-jerasure
221 ceph osd erasure-code-profile set $profile \
224 crush-failure-domain=osd || return 1
225 create_pool $poolname 12 12 erasure $profile \
228 rados_put_get $dir $poolname || return 1
229 rados_osds_out_in $dir $poolname || return 1
231 delete_pool $poolname
232 ceph osd erasure-code-profile rm $profile
235 function TEST_rados_put_get_shec() {
238 local poolname=pool-shec
239 local profile=profile-shec
241 ceph osd erasure-code-profile set $profile \
244 crush-failure-domain=osd || return 1
245 create_pool $poolname 12 12 erasure $profile \
248 rados_put_get $dir $poolname || return 1
250 delete_pool $poolname
251 ceph osd erasure-code-profile rm $profile
254 function TEST_alignment_constraints() {
256 echo "$payload" > $dir/ORIGINAL
258 # Verify that the rados command enforces alignment constraints
259 # imposed by the stripe width
260 # See http://tracker.ceph.com/issues/8622
262 local stripe_unit=$(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit)
263 eval local $(ceph osd erasure-code-profile get myprofile | grep k=)
264 local block_size=$((stripe_unit * k - 1))
265 dd if=/dev/zero of=$dir/ORIGINAL bs=$block_size count=2
266 rados --block-size=$block_size \
267 --pool ecpool put UNALIGNED $dir/ORIGINAL || return 1
271 function chunk_size() {
272 echo $(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit)
276 # By default an object will be split in two (k=2) with the first part
277 # of the object in the first OSD of the up set and the second part in
278 # the next OSD in the up set. This layout is defined by the mapping
279 # parameter and this function helps verify that the first and second
280 # part of the object are located in the OSD where they should be.
282 function verify_chunk_mapping() {
288 local payload=$(printf '%*s' $(chunk_size) FIRST$poolname ; printf '%*s' $(chunk_size) SECOND$poolname)
289 echo -n "$payload" > $dir/ORIGINAL
291 rados --pool $poolname put SOMETHING$poolname $dir/ORIGINAL || return 1
292 rados --pool $poolname get SOMETHING$poolname $dir/COPY || return 1
293 local -a osds=($(get_osds $poolname SOMETHING$poolname))
294 for (( i = 0; i < ${#osds[@]}; i++ )) ; do
295 ceph daemon osd.${osds[$i]} flush_journal
297 diff $dir/ORIGINAL $dir/COPY || return 1
300 local -a osds=($(get_osds $poolname SOMETHING$poolname))
301 grep --quiet --recursive --text FIRST$poolname $dir/${osds[$first]} || return 1
302 grep --quiet --recursive --text SECOND$poolname $dir/${osds[$second]} || return 1
305 function TEST_chunk_mapping() {
309 # mapping=DD_ is the default:
310 # first OSD (i.e. 0) in the up set has the first part of the object
311 # second OSD (i.e. 1) in the up set has the second part of the object
313 verify_chunk_mapping $dir ecpool 0 1 || return 1
315 ceph osd erasure-code-profile set remap-profile \
317 layers='[ [ "_DD", "" ] ]' \
319 crush-steps='[ [ "choose", "osd", 0 ] ]' || return 1
320 ceph osd erasure-code-profile get remap-profile
321 create_pool remap-pool 12 12 erasure remap-profile \
326 # second OSD (i.e. 1) in the up set has the first part of the object
327 # third OSD (i.e. 2) in the up set has the second part of the object
329 verify_chunk_mapping $dir remap-pool 1 2 || return 1
331 delete_pool remap-pool
332 ceph osd erasure-code-profile rm remap-profile
335 main test-erasure-code "$@"
338 # compile-command: "cd ../.. ; make -j4 && test/erasure-code/test-erasure-code.sh"