3 Run ceph on two nodes, using one of them as a client,
4 with a separate client-only node.
5 Use xfs beneath the osds.
6 install ceph/luminous v12.2.2 point version
7 run workload and upgrade-sequence in parallel
8 install ceph/luminous latest version
9 run workload and upgrade-sequence in parallel
10 install ceph/-x version (luminous or master/mimic)
11 run workload and upgrade-sequence in parallel
22 mon debug unsafe allow tier with nonempty snaps: true
23 mon warn on pool no app: false
25 osd map max advance: 1000
26 osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
27 replica_log rgw sdk statelog timeindex user version"
28 osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
29 replica_log rgw sdk statelog timeindex user version"
31 rgw_crypt_require_ssl: false
32 rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
48 - volumes: # attached to each instance
52 - print: "**** v12.2.2 about to install"
55 # line below can be removed its from jewel test
56 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
57 - print: "**** done v12.2.2 install"
60 add_osds_to_crush: true
61 - print: "**** done ceph xfs"
64 - print: "**** done workload"
66 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
71 # Note that client.a IS NOT upgraded at this point
74 - upgrade-sequence_luminous
75 - print: "**** done parallel luminous branch"
77 #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
80 - print: "**** done branch: luminous install.upgrade on client.1"
84 - print: "**** done branch: -x install.upgrade on mon.a and mon.b"
88 - print: "**** done parallel -x branch"
91 - ceph osd set-require-min-compat-client luminous
92 # Run librados tests on the -x upgraded cluster
101 - print: "**** done final test on -x cluster"
102 #######################
108 - suites/blogbench.sh
117 - print: "**** done rados/test.sh & cls workload_luminous"
120 - print: "**** done rgw workload_luminous"
123 force-branch: ceph-luminous
125 scan_for_encryption_keys: false
126 - print: "**** done s3tests workload_luminous"
127 upgrade-sequence_luminous:
129 - print: "**** done branch: luminous install.upgrade"
130 - ceph.restart: [mds.a]
133 - ceph.restart: [osd.0]
136 - ceph.restart: [osd.1]
139 - ceph.restart: [osd.2]
142 - ceph.restart: [osd.3]
145 - ceph.restart: [osd.4]
148 - ceph.restart: [osd.5]
151 - ceph.restart: [mon.a]
154 - ceph.restart: [mon.b]
157 - ceph.restart: [mon.c]
160 - print: "**** done ceph.restart all luminous branch mds/osd/mon"
169 - print: "**** done rados/test.sh & cls workload_x NOT upgraded client"
176 - print: "**** done rados/test.sh & cls workload_x upgraded client"
178 - print: "**** done rgw workload_x"
181 force-branch: ceph-luminous
183 scan_for_encryption_keys: false
184 - print: "**** done s3tests workload_x"
187 - ceph.restart: [mds.a]
190 - ceph.restart: [mon.a]
193 - ceph.restart: [mon.b]
196 - ceph.restart: [mon.c]
199 - ceph.restart: [osd.0]
202 - ceph.restart: [osd.1]
205 - ceph.restart: [osd.2]
208 - ceph.restart: [osd.3]
211 - ceph.restart: [osd.4]
216 wait-for-healthy: false
217 wait-for-up-osds: true
220 wait-for-healthy: false
223 - ceph osd require-osd-release luminous
225 - print: "**** done ceph.restart all -x branch mds/osd/mon"