initial code repo
[stor4nfv.git] / src / ceph / qa / suites / upgrade / hammer-jewel-x / tiering / 3-upgrade.yaml
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml
new file mode 100644 (file)
index 0000000..b2fc171
--- /dev/null
@@ -0,0 +1,52 @@
+tasks:
+- parallel:
+   - workload
+   - upgrade-sequence
+- print: "**** done parallel"
+
+workload:
+  sequential:
+  - rados:
+      clients: [client.0]
+      pools: [base-pool]
+      ops: 4000
+      objects: 500
+      op_weights:
+        read: 100
+        write: 100
+        delete: 50
+        copy_from: 50
+        cache_flush: 50
+        cache_try_flush: 50
+        cache_evict: 50
+  - print: "**** done rados"
+
+upgrade-sequence:
+  sequential:
+  - install.upgrade:
+      exclude_packages:
+        - ceph-mgr
+        - libcephfs2
+        - libcephfs-devel
+        - libcephfs-dev
+      osd.0:
+        branch: jewel
+      osd.2:
+        branch: jewel
+  - print: "*** done install.upgrade osd.0 and osd.2"
+  - ceph.restart:
+      daemons: [osd.0, osd.1, osd.2, osd.3]
+      wait-for-healthy: false
+      wait-for-osds-up: true
+  - ceph.restart:
+      daemons: [mon.a, mon.b, mon.c]
+      wait-for-healthy: false
+      wait-for-osds-up: true
+  - print: "**** done ceph.restart do not wait for healthy"
+  - exec:
+      mon.a:
+        - sleep 300 # http://tracker.ceph.com/issues/17808
+        - ceph osd set sortbitwise
+        - ceph osd set require_jewel_osds
+  - ceph.healthy:
+  - print: "**** done ceph.healthy"