6 crushtool -o crushmap --build --num_osds 10 host straw 2 rack straw 2 row straw 2 root straw 0
7 ceph osd setcrushmap -i crushmap
9 ceph tell osd.* injectargs --osd_max_markdown_count 1024 --osd_max_markdown_period 1
12 while ceph health | grep down
18 test_mark_two_osds_same_host_down() {
20 ceph osd down osd.0 osd.1
22 ceph health | grep "1 host"
23 ceph health | grep "2 osds"
24 ceph health detail | grep "osd.0"
25 ceph health detail | grep "osd.1"
30 test_mark_two_osds_same_rack_down() {
32 ceph osd down osd.8 osd.9
34 ceph health | grep "1 host"
35 ceph health | grep "1 rack"
36 ceph health | grep "1 row"
37 ceph health | grep "2 osds"
38 ceph health detail | grep "osd.8"
39 ceph health detail | grep "osd.9"
44 test_mark_all_but_last_osds_down() {
46 ceph osd down $(ceph osd ls | sed \$d)
48 ceph health | grep "1 row"
49 ceph health | grep "2 racks"
50 ceph health | grep "4 hosts"
51 ceph health | grep "9 osds"
56 test_mark_two_osds_same_host_down_with_classes() {
58 ceph osd crush set-device-class ssd osd.0 osd.2 osd.4 osd.6 osd.8
59 ceph osd crush set-device-class hdd osd.1 osd.3 osd.5 osd.7 osd.9
60 ceph osd down osd.0 osd.1
62 ceph health | grep "1 host"
63 ceph health | grep "2 osds"
64 ceph health detail | grep "osd.0"
65 ceph health detail | grep "osd.1"
70 test_mark_two_osds_same_host_down
71 test_mark_two_osds_same_rack_down
72 test_mark_all_but_last_osds_down
73 test_mark_two_osds_same_host_down_with_classes