remove ceph code
[stor4nfv.git] / src / ceph / qa / suites / rbd / cli / pool / ec-data-pool.yaml
diff --git a/src/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml b/src/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml
deleted file mode 100644 (file)
index 376bf08..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-tasks:
-- exec:
-    client.0:
-      - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
-      - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
-      - sudo ceph osd pool set datapool allow_ec_overwrites true
-      - rbd pool init datapool
-
-overrides:
-  thrashosds:
-    bdev_inject_crash: 2
-    bdev_inject_crash_probability: .5
-  ceph:
-    fs: xfs
-    log-whitelist:
-      - overall HEALTH_
-      - \(CACHE_POOL_NO_HIT_SET\)
-    conf:
-      client:
-        rbd default data pool: datapool
-      osd: # force bluestore since it's required for ec overwrites
-        osd objectstore: bluestore
-        bluestore block size: 96636764160
-        enable experimental unrecoverable data corrupting features: "*"
-        osd debug randomize hobject sort order: false
-# this doesn't work with failures bc the log writes are not atomic across the two backends
-#        bluestore bluefs env mirror: true