Fix some bugs when testing opensds ansible
[stor4nfv.git] / src / ceph / qa / suites / upgrade / jewel-x / point-to-point-x / point-to-point-upgrade.yaml
1 meta:
2 - desc: |
3    Run ceph on two nodes, using one of them as a client,
4    with a separate client-only node. 
5    Use xfs beneath the osds.
6    install ceph/jewel v10.2.0 point version
7    run workload and upgrade-sequence in parallel
8    install ceph/jewel latest version
9    run workload and upgrade-sequence in parallel
10    install ceph/-x version (jewel or kraken)
11    run workload and upgrade-sequence in parallel
12 overrides:
13   ceph:
14     log-whitelist:
15     - reached quota
16     - scrub
17     - osd_map_max_advance
18     - wrongly marked
19     - overall HEALTH_
20     - \(MGR_DOWN\)
21     - \(OSD_
22     - \(PG_
23     - \(CACHE_
24     fs: xfs
25     conf:
26       global:
27         mon warn on pool no app: false
28       mon:
29         mon debug unsafe allow tier with nonempty snaps: true
30       osd:
31         osd map max advance: 1000
32         osd map cache size: 1100
33 roles:
34 - - mon.a
35   - mds.a
36   - osd.0
37   - osd.1
38   - osd.2
39   - mgr.x
40 - - mon.b
41   - mon.c
42   - osd.3
43   - osd.4
44   - osd.5
45   - client.0
46 - - client.1
47 openstack:
48 - volumes: # attached to each instance
49     count: 3
50     size: 30 # GB
51 tasks:
52 - print: "****  v10.2.0 about to install"
53 - install:
54     tag: v10.2.0
55     exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
56 - print: "**** done v10.2.0 install"
57 - ceph:
58    fs: xfs
59    skip_mgr_daemons: true
60    add_osds_to_crush: true
61 - print: "**** done ceph xfs"
62 - sequential:
63    - workload
64 - print: "**** done workload v10.2.0"
65 - install.upgrade:
66     exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
67     mon.a:
68       branch: jewel
69     mon.b:
70       branch: jewel
71     # Note that client.a IS NOT upgraded at this point
72     #client.1:
73       #branch: jewel
74 - parallel:
75    - workload_jewel
76    - upgrade-sequence_jewel
77 - print: "**** done parallel jewel branch"
78 - install.upgrade:
79     exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
80     client.1:
81       branch: jewel
82 - print: "**** done branch: jewel install.upgrade on client.1"
83 - install.upgrade:
84     mon.a:
85     mon.b:
86 - print: "**** done branch: -x install.upgrade on mon.a and mon.b"
87 - parallel:
88    - workload_x
89    - upgrade-sequence_x
90 - print: "**** done parallel -x branch"
91 - exec:
92     osd.0:
93       - ceph osd set-require-min-compat-client luminous
94 # Run librados tests on the -x upgraded cluster
95 - install.upgrade:
96     client.1:
97 - workunit:
98     branch: jewel
99     clients:
100       client.1:
101       - rados/test-upgrade-v11.0.0.sh
102       - cls
103 - print: "**** done final test on -x cluster"
104 #######################
105 workload:
106    sequential:
107    - workunit:
108        clients:
109          client.0:
110            - suites/blogbench.sh
111 workload_jewel:
112    full_sequential:
113    - workunit:
114        branch: jewel
115        clients:
116          client.1:
117          - rados/test.sh
118          - cls
119        env:
120          CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
121    - print: "**** done rados/test.sh &  cls workload_jewel"
122    - sequential:
123      - rgw: [client.0]
124      - print: "**** done rgw workload_jewel"
125      - s3tests:
126          client.0:
127            force-branch: ceph-jewel
128            rgw_server: client.0
129            scan_for_encryption_keys: false
130      - print: "**** done s3tests workload_jewel"
131 upgrade-sequence_jewel:
132    sequential:
133    - print: "**** done branch: jewel install.upgrade"
134    - ceph.restart: [mds.a]
135    - sleep:
136        duration: 60
137    - ceph.restart: [osd.0]
138    - sleep:
139        duration: 30
140    - ceph.restart: [osd.1]
141    - sleep:
142        duration: 30
143    - ceph.restart: [osd.2]
144    - sleep:
145        duration: 30
146    - ceph.restart: [osd.3]
147    - sleep:
148        duration: 30
149    - ceph.restart: [osd.4]
150    - sleep:
151        duration: 30
152    - ceph.restart: [osd.5]
153    - sleep:
154        duration: 60
155    - ceph.restart: [mon.a]
156    - sleep:
157        duration: 60
158    - ceph.restart: [mon.b]
159    - sleep:
160        duration: 60
161    - ceph.restart: [mon.c]
162    - sleep:
163        duration: 60
164    - print: "**** done ceph.restart all jewel branch mds/osd/mon"
165 workload_x:
166    sequential:
167    - workunit:
168        branch: jewel
169        clients:
170          client.1:
171          - rados/test-upgrade-v11.0.0-noec.sh
172          - cls
173        env:
174          CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
175    - print: "**** done rados/test-upgrade-v11.0.0.sh &  cls workload_x NOT upgraded  client"
176    - workunit:
177        branch: jewel
178        clients:
179          client.0:
180          - rados/test-upgrade-v11.0.0-noec.sh
181          - cls
182    - print: "**** done rados/test-upgrade-v11.0.0.sh &  cls workload_x upgraded client"
183    - rgw: [client.1]
184    - print: "**** done rgw workload_x"
185    - s3tests:
186        client.1:
187          force-branch: ceph-jewel
188          rgw_server: client.1
189          scan_for_encryption_keys: false
190    - print: "**** done s3tests workload_x"
191 upgrade-sequence_x:
192    sequential:
193    - ceph.restart: [mds.a]
194    - sleep:
195        duration: 60
196    - ceph.restart: [mon.a]
197    - sleep:
198        duration: 60
199    - ceph.restart: [mon.b]
200    - sleep:
201        duration: 60
202    - ceph.restart: [mon.c]
203    - sleep:
204        duration: 60
205    - ceph.restart: [osd.0]
206    - sleep:
207        duration: 30
208    - ceph.restart: [osd.1]
209    - sleep:
210        duration: 30
211    - ceph.restart: [osd.2]
212    - sleep:
213        duration: 30
214    - ceph.restart: [osd.3]
215    - sleep:
216        duration: 30
217    - ceph.restart: [osd.4]
218    - sleep:
219        duration: 30
220    - ceph.restart:
221        daemons: [osd.5]
222        wait-for-healthy: false
223        wait-for-up-osds: true
224    - exec:
225       mgr.x:
226         - mkdir -p /var/lib/ceph/mgr/ceph-x
227         - ceph auth get-or-create-key mgr.x mon 'allow profile mgr'
228         - ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring
229    - ceph.restart:
230        daemons: [mgr.x]
231        wait-for-healthy: false
232    - exec:
233        osd.0:
234          - ceph osd require-osd-release luminous
235    - ceph.healthy:
236    - print: "**** done ceph.restart all -x branch mds/osd/mon"