X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=src%2Fceph%2Fqa%2Fsuites%2Frados%2Fbasic%2Fd-require-luminous%2Fat-end.yaml;fp=src%2Fceph%2Fqa%2Fsuites%2Frados%2Fbasic%2Fd-require-luminous%2Fat-end.yaml;h=ef998cc89143d7b453f04fb160ddcd932e6bc18f;hb=812ff6ca9fcd3e629e49d4328905f33eee8ca3f5;hp=0000000000000000000000000000000000000000;hpb=15280273faafb77777eab341909a3f495cf248d9;p=stor4nfv.git diff --git a/src/ceph/qa/suites/rados/basic/d-require-luminous/at-end.yaml b/src/ceph/qa/suites/rados/basic/d-require-luminous/at-end.yaml new file mode 100644 index 0000000..ef998cc --- /dev/null +++ b/src/ceph/qa/suites/rados/basic/d-require-luminous/at-end.yaml @@ -0,0 +1,33 @@ +# do not require luminous osds at mkfs time; only set flag at +# the end of the test run, then do a final scrub (to convert any +# legacy snapsets), and verify we are healthy. +tasks: +- full_sequential_finally: + - exec: + mon.a: + - ceph osd require-osd-release luminous + - ceph osd pool application enable base rados || true +# make sure osds have latest map + - rados -p rbd bench 5 write -b 4096 + - ceph.healthy: + - ceph.osd_scrub_pgs: + cluster: ceph + - exec: + mon.a: + - sleep 15 + - ceph osd dump | grep purged_snapdirs + - ceph pg dump -f json-pretty + - "ceph pg dump sum -f json-pretty | grep num_legacy_snapsets | head -1 | grep ': 0'" +overrides: + ceph: + conf: + global: + mon debug no require luminous: true + +# setting luminous triggers peering, which *might* trigger health alerts + log-whitelist: + - overall HEALTH_ + - \(PG_AVAILABILITY\) + - \(PG_DEGRADED\) + thrashosds: + chance_thrash_cluster_full: 0