initial code repo
[stor4nfv.git] / src / ceph / qa / suites / upgrade / jewel-x / point-to-point-x / point-to-point-upgrade.yaml
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml
new file mode 100644 (file)
index 0000000..d68c258
--- /dev/null
@@ -0,0 +1,236 @@
+meta:
+- desc: |
+   Run ceph on two nodes, using one of them as a client,
+   with a separate client-only node. 
+   Use xfs beneath the osds.
+   install ceph/jewel v10.2.0 point version
+   run workload and upgrade-sequence in parallel
+   install ceph/jewel latest version
+   run workload and upgrade-sequence in parallel
+   install ceph/-x version (jewel or kraken)
+   run workload and upgrade-sequence in parallel
+overrides:
+  ceph:
+    log-whitelist:
+    - reached quota
+    - scrub
+    - osd_map_max_advance
+    - wrongly marked
+    - overall HEALTH_
+    - \(MGR_DOWN\)
+    - \(OSD_
+    - \(PG_
+    - \(CACHE_
+    fs: xfs
+    conf:
+      global:
+        mon warn on pool no app: false
+      mon:
+        mon debug unsafe allow tier with nonempty snaps: true
+      osd:
+        osd map max advance: 1000
+        osd map cache size: 1100
+roles:
+- - mon.a
+  - mds.a
+  - osd.0
+  - osd.1
+  - osd.2
+  - mgr.x
+- - mon.b
+  - mon.c
+  - osd.3
+  - osd.4
+  - osd.5
+  - client.0
+- - client.1
+openstack:
+- volumes: # attached to each instance
+    count: 3
+    size: 30 # GB
+tasks:
+- print: "****  v10.2.0 about to install"
+- install:
+    tag: v10.2.0
+    exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
+- print: "**** done v10.2.0 install"
+- ceph:
+   fs: xfs
+   skip_mgr_daemons: true
+   add_osds_to_crush: true
+- print: "**** done ceph xfs"
+- sequential:
+   - workload
+- print: "**** done workload v10.2.0"
+- install.upgrade:
+    exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+    mon.a:
+      branch: jewel
+    mon.b:
+      branch: jewel
+    # Note that client.a IS NOT upgraded at this point
+    #client.1:
+      #branch: jewel
+- parallel:
+   - workload_jewel
+   - upgrade-sequence_jewel
+- print: "**** done parallel jewel branch"
+- install.upgrade:
+    exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+    client.1:
+      branch: jewel
+- print: "**** done branch: jewel install.upgrade on client.1"
+- install.upgrade:
+    mon.a:
+    mon.b:
+- print: "**** done branch: -x install.upgrade on mon.a and mon.b"
+- parallel:
+   - workload_x
+   - upgrade-sequence_x
+- print: "**** done parallel -x branch"
+- exec:
+    osd.0:
+      - ceph osd set-require-min-compat-client luminous
+# Run librados tests on the -x upgraded cluster
+- install.upgrade:
+    client.1:
+- workunit:
+    branch: jewel
+    clients:
+      client.1:
+      - rados/test-upgrade-v11.0.0.sh
+      - cls
+- print: "**** done final test on -x cluster"
+#######################
+workload:
+   sequential:
+   - workunit:
+       clients:
+         client.0:
+           - suites/blogbench.sh
+workload_jewel:
+   full_sequential:
+   - workunit:
+       branch: jewel
+       clients:
+         client.1:
+         - rados/test.sh
+         - cls
+       env:
+         CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
+   - print: "**** done rados/test.sh &  cls workload_jewel"
+   - sequential:
+     - rgw: [client.0]
+     - print: "**** done rgw workload_jewel"
+     - s3tests:
+         client.0:
+           force-branch: ceph-jewel
+           rgw_server: client.0
+           scan_for_encryption_keys: false
+     - print: "**** done s3tests workload_jewel"
+upgrade-sequence_jewel:
+   sequential:
+   - print: "**** done branch: jewel install.upgrade"
+   - ceph.restart: [mds.a]
+   - sleep:
+       duration: 60
+   - ceph.restart: [osd.0]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.1]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.2]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.3]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.4]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.5]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.a]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.b]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.c]
+   - sleep:
+       duration: 60
+   - print: "**** done ceph.restart all jewel branch mds/osd/mon"
+workload_x:
+   sequential:
+   - workunit:
+       branch: jewel
+       clients:
+         client.1:
+         - rados/test-upgrade-v11.0.0-noec.sh
+         - cls
+       env:
+         CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
+   - print: "**** done rados/test-upgrade-v11.0.0.sh &  cls workload_x NOT upgraded  client"
+   - workunit:
+       branch: jewel
+       clients:
+         client.0:
+         - rados/test-upgrade-v11.0.0-noec.sh
+         - cls
+   - print: "**** done rados/test-upgrade-v11.0.0.sh &  cls workload_x upgraded client"
+   - rgw: [client.1]
+   - print: "**** done rgw workload_x"
+   - s3tests:
+       client.1:
+         force-branch: ceph-jewel
+         rgw_server: client.1
+         scan_for_encryption_keys: false
+   - print: "**** done s3tests workload_x"
+upgrade-sequence_x:
+   sequential:
+   - ceph.restart: [mds.a]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.a]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.b]
+   - sleep:
+       duration: 60
+   - ceph.restart: [mon.c]
+   - sleep:
+       duration: 60
+   - ceph.restart: [osd.0]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.1]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.2]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.3]
+   - sleep:
+       duration: 30
+   - ceph.restart: [osd.4]
+   - sleep:
+       duration: 30
+   - ceph.restart:
+       daemons: [osd.5]
+       wait-for-healthy: false
+       wait-for-up-osds: true
+   - exec:
+      mgr.x:
+        - mkdir -p /var/lib/ceph/mgr/ceph-x
+        - ceph auth get-or-create-key mgr.x mon 'allow profile mgr'
+        - ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring
+   - ceph.restart:
+       daemons: [mgr.x]
+       wait-for-healthy: false
+   - exec:
+       osd.0:
+         - ceph osd require-osd-release luminous
+   - ceph.healthy:
+   - print: "**** done ceph.restart all -x branch mds/osd/mon"