3 Run ceph on two nodes, using one of them as a client,
4 with a separate client-only node.
5 Use xfs beneath the osds.
6 install ceph/jewel v10.2.0 point version
7 run workload and upgrade-sequence in parallel
8 install ceph/jewel latest version
9 run workload and upgrade-sequence in parallel
10 install ceph/-x version (jewel or kraken)
11 run workload and upgrade-sequence in parallel
27 mon warn on pool no app: false
29 mon debug unsafe allow tier with nonempty snaps: true
31 osd map max advance: 1000
32 osd map cache size: 1100
48 - volumes: # attached to each instance
52 - print: "**** v10.2.0 about to install"
55 exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
56 - print: "**** done v10.2.0 install"
59 skip_mgr_daemons: true
60 add_osds_to_crush: true
61 - print: "**** done ceph xfs"
64 - print: "**** done workload v10.2.0"
66 exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
71 # Note that client.a IS NOT upgraded at this point
76 - upgrade-sequence_jewel
77 - print: "**** done parallel jewel branch"
79 exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
82 - print: "**** done branch: jewel install.upgrade on client.1"
86 - print: "**** done branch: -x install.upgrade on mon.a and mon.b"
90 - print: "**** done parallel -x branch"
93 - ceph osd set-require-min-compat-client luminous
94 # Run librados tests on the -x upgraded cluster
101 - rados/test-upgrade-v11.0.0.sh
103 - print: "**** done final test on -x cluster"
104 #######################
110 - suites/blogbench.sh
120 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
121 - print: "**** done rados/test.sh & cls workload_jewel"
124 - print: "**** done rgw workload_jewel"
127 force-branch: ceph-jewel
129 scan_for_encryption_keys: false
130 - print: "**** done s3tests workload_jewel"
131 upgrade-sequence_jewel:
133 - print: "**** done branch: jewel install.upgrade"
134 - ceph.restart: [mds.a]
137 - ceph.restart: [osd.0]
140 - ceph.restart: [osd.1]
143 - ceph.restart: [osd.2]
146 - ceph.restart: [osd.3]
149 - ceph.restart: [osd.4]
152 - ceph.restart: [osd.5]
155 - ceph.restart: [mon.a]
158 - ceph.restart: [mon.b]
161 - ceph.restart: [mon.c]
164 - print: "**** done ceph.restart all jewel branch mds/osd/mon"
171 - rados/test-upgrade-v11.0.0-noec.sh
174 CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
175 - print: "**** done rados/test-upgrade-v11.0.0.sh & cls workload_x NOT upgraded client"
180 - rados/test-upgrade-v11.0.0-noec.sh
182 - print: "**** done rados/test-upgrade-v11.0.0.sh & cls workload_x upgraded client"
184 - print: "**** done rgw workload_x"
187 force-branch: ceph-jewel
189 scan_for_encryption_keys: false
190 - print: "**** done s3tests workload_x"
193 - ceph.restart: [mds.a]
196 - ceph.restart: [mon.a]
199 - ceph.restart: [mon.b]
202 - ceph.restart: [mon.c]
205 - ceph.restart: [osd.0]
208 - ceph.restart: [osd.1]
211 - ceph.restart: [osd.2]
214 - ceph.restart: [osd.3]
217 - ceph.restart: [osd.4]
222 wait-for-healthy: false
223 wait-for-up-osds: true
226 - mkdir -p /var/lib/ceph/mgr/ceph-x
227 - ceph auth get-or-create-key mgr.x mon 'allow profile mgr'
228 - ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring
231 wait-for-healthy: false
234 - ceph osd require-osd-release luminous
236 - print: "**** done ceph.restart all -x branch mds/osd/mon"