initial code repo
[stor4nfv.git] / src / ceph / qa / suites / rados / basic / d-require-luminous / at-end.yaml
diff --git a/src/ceph/qa/suites/rados/basic/d-require-luminous/at-end.yaml b/src/ceph/qa/suites/rados/basic/d-require-luminous/at-end.yaml
new file mode 100644 (file)
index 0000000..ef998cc
--- /dev/null
@@ -0,0 +1,33 @@
+# do not require luminous osds at mkfs time; only set flag at
+# the end of the test run, then do a final scrub (to convert any
+# legacy snapsets), and verify we are healthy.
+tasks:
+- full_sequential_finally:
+  - exec:
+      mon.a:
+        - ceph osd require-osd-release luminous
+        - ceph osd pool application enable base rados || true
+# make sure osds have latest map
+        - rados -p rbd bench 5 write -b 4096
+  - ceph.healthy:
+  - ceph.osd_scrub_pgs:
+      cluster: ceph
+  - exec:
+      mon.a:
+        - sleep 15
+        - ceph osd dump | grep purged_snapdirs
+        - ceph pg dump -f json-pretty
+        - "ceph pg dump sum -f json-pretty | grep num_legacy_snapsets | head -1 | grep ': 0'"
+overrides:
+  ceph:
+    conf:
+      global:
+        mon debug no require luminous: true
+
+# setting luminous triggers peering, which *might* trigger health alerts
+    log-whitelist:
+      - overall HEALTH_
+      - \(PG_AVAILABILITY\)
+      - \(PG_DEGRADED\)
+  thrashosds:
+    chance_thrash_cluster_full: 0