Fix some bugs when testing opensds ansible
[stor4nfv.git] / src / ceph / qa / suites / upgrade / luminous-x / point-to-point-x / point-to-point-upgrade.yaml
1 meta:
2 - desc: |
3    Run ceph on two nodes, using one of them as a client,
4    with a separate client-only node. 
5    Use xfs beneath the osds.
6    install ceph/luminous v12.2.2 point version
7    run workload and upgrade-sequence in parallel
8    install ceph/luminous latest version
9    run workload and upgrade-sequence in parallel
10    install ceph/-x version (luminous or master/mimic)
11    run workload and upgrade-sequence in parallel
12 overrides:
13   ceph:
14     log-whitelist:
15     - reached quota
16     - scrub
17     - osd_map_max_advance
18     - wrongly marked
19     fs: xfs
20     conf:
21       mon:
22         mon debug unsafe allow tier with nonempty snaps: true
23         mon warn on pool no app: false
24       osd:
25         osd map max advance: 1000
26         osd_class_load_list: "cephfs hello journal lock log numops rbd refcount 
27                               replica_log rgw sdk statelog timeindex user version"
28         osd_class_default_list: "cephfs hello journal lock log numops rbd refcount 
29                                  replica_log rgw sdk statelog timeindex user version"
30       client:
31         rgw_crypt_require_ssl: false
32         rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
33 roles:
34 - - mon.a
35   - mds.a
36   - osd.0
37   - osd.1
38   - osd.2
39   - mgr.x
40 - - mon.b
41   - mon.c
42   - osd.3
43   - osd.4
44   - osd.5
45   - client.0
46 - - client.1
47 openstack:
48 - volumes: # attached to each instance
49     count: 3
50     size: 30 # GB
51 tasks:
52 - print: "****  v12.2.2 about to install"
53 - install:
54     tag: v12.2.2
55     # line below can be removed its from jewel test
56     #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
57 - print: "**** done v12.2.2 install"
58 - ceph:
59    fs: xfs
60    add_osds_to_crush: true
61 - print: "**** done ceph xfs"
62 - sequential:
63    - workload
64 - print: "**** done workload"
65 - install.upgrade:
66     #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
67     mon.a:
68       branch: luminous
69     mon.b:
70       branch: luminous
71     # Note that client.a IS NOT upgraded at this point
72 - parallel:
73    - workload_luminous
74    - upgrade-sequence_luminous
75 - print: "**** done parallel luminous branch"
76 - install.upgrade:
77     #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
78     client.1:
79       branch: luminous
80 - print: "**** done branch: luminous install.upgrade on client.1"
81 - install.upgrade:
82     mon.a:
83     mon.b:
84 - print: "**** done branch: -x install.upgrade on mon.a and mon.b"
85 - parallel:
86    - workload_x
87    - upgrade-sequence_x
88 - print: "**** done parallel -x branch"
89 - exec:
90     osd.0:
91       - ceph osd set-require-min-compat-client luminous
92 # Run librados tests on the -x upgraded cluster
93 - install.upgrade:
94     client.1:
95 - workunit:
96     branch: luminous
97     clients:
98       client.1:
99       - rados/test.sh
100       - cls
101 - print: "**** done final test on -x cluster"
102 #######################
103 workload:
104    sequential:
105    - workunit:
106        clients:
107          client.0:
108            - suites/blogbench.sh
109 workload_luminous:
110    full_sequential:
111    - workunit:
112        branch: luminous
113        clients:
114          client.1:
115          - rados/test.sh
116          - cls
117    - print: "**** done rados/test.sh &  cls workload_luminous"
118    - sequential:
119      - rgw: [client.0]
120      - print: "**** done rgw workload_luminous"
121      - s3tests:
122          client.0:
123            force-branch: ceph-luminous
124            rgw_server: client.0
125            scan_for_encryption_keys: false
126      - print: "**** done s3tests workload_luminous"
127 upgrade-sequence_luminous:
128    sequential:
129    - print: "**** done branch: luminous install.upgrade"
130    - ceph.restart: [mds.a]
131    - sleep:
132        duration: 60
133    - ceph.restart: [osd.0]
134    - sleep:
135        duration: 30
136    - ceph.restart: [osd.1]
137    - sleep:
138        duration: 30
139    - ceph.restart: [osd.2]
140    - sleep:
141        duration: 30
142    - ceph.restart: [osd.3]
143    - sleep:
144        duration: 30
145    - ceph.restart: [osd.4]
146    - sleep:
147        duration: 30
148    - ceph.restart: [osd.5]
149    - sleep:
150        duration: 60
151    - ceph.restart: [mon.a]
152    - sleep:
153        duration: 60
154    - ceph.restart: [mon.b]
155    - sleep:
156        duration: 60
157    - ceph.restart: [mon.c]
158    - sleep:
159        duration: 60
160    - print: "**** done ceph.restart all luminous branch mds/osd/mon"
161 workload_x:
162    sequential:
163    - workunit:
164        branch: luminous
165        clients:
166          client.1:
167          - rados/test.sh
168          - cls
169    - print: "**** done rados/test.sh &  cls workload_x NOT upgraded  client"
170    - workunit:
171        branch: luminous
172        clients:
173          client.0:
174          - rados/test.sh
175          - cls
176    - print: "**** done rados/test.sh &  cls workload_x upgraded client"
177    - rgw: [client.1]
178    - print: "**** done rgw workload_x"
179    - s3tests:
180        client.1:
181          force-branch: ceph-luminous
182          rgw_server: client.1
183          scan_for_encryption_keys: false
184    - print: "**** done s3tests workload_x"
185 upgrade-sequence_x:
186    sequential:
187    - ceph.restart: [mds.a]
188    - sleep:
189        duration: 60
190    - ceph.restart: [mon.a]
191    - sleep:
192        duration: 60
193    - ceph.restart: [mon.b]
194    - sleep:
195        duration: 60
196    - ceph.restart: [mon.c]
197    - sleep:
198        duration: 60
199    - ceph.restart: [osd.0]
200    - sleep:
201        duration: 30
202    - ceph.restart: [osd.1]
203    - sleep:
204        duration: 30
205    - ceph.restart: [osd.2]
206    - sleep:
207        duration: 30
208    - ceph.restart: [osd.3]
209    - sleep:
210        duration: 30
211    - ceph.restart: [osd.4]
212    - sleep:
213        duration: 30
214    - ceph.restart:
215        daemons: [osd.5]
216        wait-for-healthy: false
217        wait-for-up-osds: true
218    - ceph.restart:
219        daemons: [mgr.x]
220        wait-for-healthy: false
221    - exec:
222        osd.0:
223          - ceph osd require-osd-release luminous
224    - ceph.healthy:
225    - print: "**** done ceph.restart all -x branch mds/osd/mon"