X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=src%2Fceph%2Fqa%2Fworkunits%2Frados%2Ftest_health_warnings.sh;fp=src%2Fceph%2Fqa%2Fworkunits%2Frados%2Ftest_health_warnings.sh;h=0000000000000000000000000000000000000000;hb=7da45d65be36d36b880cc55c5036e96c24b53f00;hp=a4a9c11c638cf4833ab5cc71f42063f70c2c0311;hpb=691462d09d0987b47e112d6ee8740375df3c51b2;p=stor4nfv.git diff --git a/src/ceph/qa/workunits/rados/test_health_warnings.sh b/src/ceph/qa/workunits/rados/test_health_warnings.sh deleted file mode 100755 index a4a9c11..0000000 --- a/src/ceph/qa/workunits/rados/test_health_warnings.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/bin/bash -ex - -set -u - -# number of osds = 10 -crushtool -o crushmap --build --num_osds 10 host straw 2 rack straw 2 row straw 2 root straw 0 -ceph osd setcrushmap -i crushmap -ceph osd tree -ceph tell osd.* injectargs --osd_max_markdown_count 1024 --osd_max_markdown_period 1 - -wait_for_healthy() { - while ceph health | grep down - do - sleep 1 - done -} - -test_mark_two_osds_same_host_down() { - ceph osd set noup - ceph osd down osd.0 osd.1 - ceph health detail - ceph health | grep "1 host" - ceph health | grep "2 osds" - ceph health detail | grep "osd.0" - ceph health detail | grep "osd.1" - ceph osd unset noup - wait_for_healthy -} - -test_mark_two_osds_same_rack_down() { - ceph osd set noup - ceph osd down osd.8 osd.9 - ceph health detail - ceph health | grep "1 host" - ceph health | grep "1 rack" - ceph health | grep "1 row" - ceph health | grep "2 osds" - ceph health detail | grep "osd.8" - ceph health detail | grep "osd.9" - ceph osd unset noup - wait_for_healthy -} - -test_mark_all_but_last_osds_down() { - ceph osd set noup - ceph osd down $(ceph osd ls | sed \$d) - ceph health detail - ceph health | grep "1 row" - ceph health | grep "2 racks" - ceph health | grep "4 hosts" - ceph health | grep "9 osds" - ceph osd unset noup - wait_for_healthy -} - -test_mark_two_osds_same_host_down_with_classes() { - ceph osd set noup - ceph osd crush set-device-class ssd osd.0 osd.2 osd.4 osd.6 osd.8 - ceph osd crush set-device-class hdd osd.1 osd.3 osd.5 osd.7 osd.9 - ceph osd down osd.0 osd.1 - ceph health detail - ceph health | grep "1 host" - ceph health | grep "2 osds" - ceph health detail | grep "osd.0" - ceph health detail | grep "osd.1" - ceph osd unset noup - wait_for_healthy -} - -test_mark_two_osds_same_host_down -test_mark_two_osds_same_rack_down -test_mark_all_but_last_osds_down -test_mark_two_osds_same_host_down_with_classes - -exit 0