Fix some bugs when testing opensds ansible
[stor4nfv.git] / src / ceph / qa / workunits / ceph-disk / ceph-disk-test.py
1 #
2 # Copyright (C) 2015, 2016 Red Hat <contact@redhat.com>
3 #
4 # Author: Loic Dachary <loic@dachary.org>
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU Library Public License as published by
8 # the Free Software Foundation; either version 2, or (at your option)
9 # any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 # GNU Library Public License for more details.
15 #
16 # When debugging these tests (must be root), here are a few useful commands:
17 #
18 #  export PATH=.:..:$PATH
19 #  ceph-disk.sh # run once to prepare the environment as it would be by teuthology
20 #  ln -sf /home/ubuntu/ceph/src/ceph-disk/ceph_disk/main.py $(which ceph-disk)
21 #  ln -sf /home/ubuntu/ceph/udev/95-ceph-osd.rules /lib/udev/rules.d/95-ceph-osd.rules
22 #  ln -sf /home/ubuntu/ceph/systemd/ceph-disk@.service /usr/lib/systemd/system/ceph-disk@.service
23 #  ceph-disk.conf will be silently ignored if it is a symbolic link or a hard link /var/log/upstart for logs
24 #  cp /home/ubuntu/ceph/src/upstart/ceph-disk.conf /etc/init/ceph-disk.conf
25 #  id=3 ; ceph-disk deactivate --deactivate-by-id $id ; ceph-disk destroy --purge --zap --destroy-by-id $id
26 #  py.test -s -v -k test_activate_dmcrypt_luks ceph-disk-test.py
27 #
28 #  CentOS 7
29 #    udevadm monitor --property & tail -f /var/log/messages
30 #    udev rules messages are logged in /var/log/messages
31 #    systemctl stop ceph-osd@2
32 #    systemctl start ceph-osd@2
33 #
34 #  udevadm monitor --property & tail -f /var/log/syslog /var/log/upstart/*  # on Ubuntu 14.04
35 #  udevadm test --action=add /block/vdb/vdb1 # verify the udev rule is run as expected
36 #  udevadm control --reload # when changing the udev rules
37 #  sudo /usr/sbin/ceph-disk -v trigger /dev/vdb1 # activates if vdb1 is data
38 #
39 #  integration tests coverage
40 #  pip install coverage
41 #  perl -pi -e 's|"ceph-disk |"coverage run --source=/usr/sbin/ceph-disk --append /usr/sbin/ceph-disk |' ceph-disk-test.py
42 #  rm -f .coverage ; py.test -s -v ceph-disk-test.py
43 #  coverage report --show-missing
44 #
45 import argparse
46 import json
47 import logging
48 import configobj
49 import os
50 import pytest
51 import re
52 import subprocess
53 import sys
54 import tempfile
55 import time
56 import uuid
57
58 LOG = logging.getLogger('CephDisk')
59
60
61 class CephDisk:
62
63     def __init__(self):
64         self.conf = configobj.ConfigObj('/etc/ceph/ceph.conf')
65
66     def save_conf(self):
67         self.conf.write(open('/etc/ceph/ceph.conf', 'wb'))
68
69     @staticmethod
70     def helper(command):
71         command = "ceph-helpers-root.sh " + command
72         return CephDisk.sh(command)
73
74     @staticmethod
75     def sh(command):
76         LOG.debug(":sh: " + command)
77         proc = subprocess.Popen(
78             args=command,
79             stdout=subprocess.PIPE,
80             stderr=subprocess.STDOUT,
81             shell=True,
82             bufsize=1)
83         output, _ = proc.communicate()
84         if proc.poll():
85             LOG.warning(output.decode('utf-8'))
86             raise subprocess.CalledProcessError(
87                 returncode=proc.returncode,
88                 cmd=command,
89                 output=output,
90             )
91         lines = []
92         for line in output.decode('utf-8').split('\n'):
93             if 'dangerous and experimental' in line:
94                 LOG.debug('SKIP dangerous and experimental')
95                 continue
96             lines.append(line)
97             LOG.debug(line.strip().encode('ascii', 'ignore'))
98         return "".join(lines)
99
100     def unused_disks(self, pattern='[vs]d.'):
101         names = [x for x in os.listdir("/sys/block") if re.match(pattern, x)]
102         if not names:
103             return []
104         disks = json.loads(
105             self.sh("ceph-disk list --format json " + " ".join(names)))
106         unused = []
107         for disk in disks:
108             if 'partitions' not in disk:
109                 unused.append(disk['path'])
110         return unused
111
112     def ensure_sd(self):
113         LOG.debug(self.unused_disks('sd.'))
114         if self.unused_disks('sd.'):
115             return
116         modprobe = "modprobe scsi_debug vpd_use_hostno=0 add_host=1 dev_size_mb=300 ; udevadm settle"
117         try:
118             self.sh(modprobe)
119         except:
120             self.helper("install linux-image-extra-3.13.0-61-generic")
121             self.sh(modprobe)
122
123     def unload_scsi_debug(self):
124         self.sh("rmmod scsi_debug || true")
125
126     def get_lockbox(self):
127         disks = json.loads(self.sh("ceph-disk list --format json"))
128         for disk in disks:
129             if 'partitions' in disk:
130                 for partition in disk['partitions']:
131                     if partition.get('type') == 'lockbox':
132                         return partition
133         raise Exception("no lockbox found " + str(disks))
134
135     def get_osd_partition(self, uuid):
136         disks = json.loads(self.sh("ceph-disk list --format json"))
137         for disk in disks:
138             if 'partitions' in disk:
139                 for partition in disk['partitions']:
140                     if partition.get('uuid') == uuid:
141                         return partition
142         raise Exception("uuid = " + uuid + " not found in " + str(disks))
143
144     def get_journal_partition(self, uuid):
145         return self.get_space_partition('journal', uuid)
146
147     def get_block_partition(self, uuid):
148         return self.get_space_partition('block', uuid)
149
150     def get_blockdb_partition(self, uuid):
151         return self.get_space_partition('block.db', uuid)
152
153     def get_blockwal_partition(self, uuid):
154         return self.get_space_partition('block.wal', uuid)
155
156     def get_space_partition(self, name, uuid):
157         data_partition = self.get_osd_partition(uuid)
158         space_dev = data_partition[name + '_dev']
159         disks = json.loads(self.sh("ceph-disk list --format json"))
160         for disk in disks:
161             if 'partitions' in disk:
162                 for partition in disk['partitions']:
163                     if partition['path'] == space_dev:
164                         if name + '_for' in partition:
165                             assert partition[
166                                 name + '_for'] == data_partition['path']
167                         return partition
168         raise Exception(
169             name + " for uuid = " + uuid + " not found in " + str(disks))
170
171     def destroy_osd(self, uuid):
172         id = self.sh("ceph osd create " + uuid).strip()
173         self.sh("""
174         set -xe
175         ceph-disk --verbose deactivate --deactivate-by-id {id}
176         ceph-disk --verbose destroy --purge --destroy-by-id {id} --zap
177         """.format(id=id))
178
179     def deactivate_osd(self, uuid):
180         id = self.sh("ceph osd create " + uuid).strip()
181         self.sh("""
182         set -xe
183         ceph-disk --verbose deactivate --once --deactivate-by-id {id}
184         """.format(id=id))
185
186     @staticmethod
187     def osd_up_predicate(osds, uuid):
188         for osd in osds:
189             if osd['uuid'] == uuid and 'up' in osd['state']:
190                 return True
191         return False
192
193     @staticmethod
194     def wait_for_osd_up(uuid):
195         CephDisk.wait_for_osd(uuid, CephDisk.osd_up_predicate, 'up')
196
197     @staticmethod
198     def osd_down_predicate(osds, uuid):
199         found = False
200         for osd in osds:
201             if osd['uuid'] == uuid:
202                 found = True
203                 if 'down' in osd['state'] or ['exists'] == osd['state']:
204                     return True
205         return not found
206
207     @staticmethod
208     def wait_for_osd_down(uuid):
209         CephDisk.wait_for_osd(uuid, CephDisk.osd_down_predicate, 'down')
210
211     @staticmethod
212     def wait_for_osd(uuid, predicate, info):
213         LOG.info("wait_for_osd " + info + " " + uuid)
214         for delay in (1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024):
215             dump = json.loads(CephDisk.sh("ceph osd dump -f json"))
216             if predicate(dump['osds'], uuid):
217                 return True
218             time.sleep(delay)
219         raise Exception('timeout waiting for osd ' + uuid + ' to be ' + info)
220
221     def check_osd_status(self, uuid, space_name=None):
222         data_partition = self.get_osd_partition(uuid)
223         assert data_partition['type'] == 'data'
224         assert data_partition['state'] == 'active'
225         if space_name is not None:
226             space_partition = self.get_space_partition(space_name, uuid)
227             assert space_partition
228
229
230 class TestCephDisk(object):
231
232     def setup_class(self):
233         logging.basicConfig(level=logging.DEBUG)
234         c = CephDisk()
235         if c.sh("lsb_release -si").strip() == 'CentOS':
236             c.helper("install multipath-tools device-mapper-multipath")
237         c.conf['global']['pid file'] = '/var/run/ceph/$cluster-$name.pid'
238         #
239         # Avoid json parsing interference
240         #
241         c.conf['global']['debug monc'] = 0
242         #
243         # objecstore
244         #
245         c.conf['global']['osd journal size'] = 100
246         #
247         # bluestore
248         #
249         c.conf['global']['bluestore fsck on mount'] = 'true'
250         c.save_conf()
251
252     def setup(self):
253         c = CephDisk()
254         for key in ('osd objectstore', 'osd dmcrypt type'):
255             if key in c.conf['global']:
256                 del c.conf['global'][key]
257         c.save_conf()
258
259     def test_deactivate_reactivate_osd(self):
260         c = CephDisk()
261         disk = c.unused_disks()[0]
262         osd_uuid = str(uuid.uuid1())
263         c.sh("ceph-disk --verbose zap " + disk)
264         c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
265              " " + disk)
266         c.wait_for_osd_up(osd_uuid)
267         device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
268         assert len(device['partitions']) == 2
269         c.check_osd_status(osd_uuid, 'journal')
270         data_partition = c.get_osd_partition(osd_uuid)
271         c.sh("ceph-disk --verbose deactivate " + data_partition['path'])
272         c.wait_for_osd_down(osd_uuid)
273         c.sh("ceph-disk --verbose activate " + data_partition['path'] + " --reactivate")
274         # check again
275         c.wait_for_osd_up(osd_uuid)
276         device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
277         assert len(device['partitions']) == 2
278         c.check_osd_status(osd_uuid, 'journal')
279         c.helper("pool_read_write")
280         c.destroy_osd(osd_uuid)
281
282     def test_destroy_osd_by_id(self):
283         c = CephDisk()
284         disk = c.unused_disks()[0]
285         osd_uuid = str(uuid.uuid1())
286         c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid + " " + disk)
287         c.wait_for_osd_up(osd_uuid)
288         c.check_osd_status(osd_uuid)
289         c.destroy_osd(osd_uuid)
290
291     def test_destroy_osd_by_dev_path(self):
292         c = CephDisk()
293         disk = c.unused_disks()[0]
294         osd_uuid = str(uuid.uuid1())
295         c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid + " " + disk)
296         c.wait_for_osd_up(osd_uuid)
297         partition = c.get_osd_partition(osd_uuid)
298         assert partition['type'] == 'data'
299         assert partition['state'] == 'active'
300         c.sh("ceph-disk --verbose deactivate " + partition['path'])
301         c.wait_for_osd_down(osd_uuid)
302         c.sh("ceph-disk --verbose destroy --purge " + partition['path'] + " --zap")
303
304     def test_deactivate_reactivate_dmcrypt_plain(self):
305         c = CephDisk()
306         c.conf['global']['osd dmcrypt type'] = 'plain'
307         c.save_conf()
308         osd_uuid = self.activate_dmcrypt('ceph-disk-no-lockbox')
309         data_partition = c.get_osd_partition(osd_uuid)
310         c.sh("ceph-disk --verbose deactivate " + data_partition['path'])
311         c.wait_for_osd_down(osd_uuid)
312         c.sh("ceph-disk --verbose activate-journal " + data_partition['journal_dev'] +
313              " --reactivate" + " --dmcrypt")
314         c.wait_for_osd_up(osd_uuid)
315         c.check_osd_status(osd_uuid, 'journal')
316         c.destroy_osd(osd_uuid)
317         c.save_conf()
318
319     def test_deactivate_reactivate_dmcrypt_luks(self):
320         c = CephDisk()
321         osd_uuid = self.activate_dmcrypt('ceph-disk')
322         data_partition = c.get_osd_partition(osd_uuid)
323         lockbox_partition = c.get_lockbox()
324         c.sh("ceph-disk --verbose deactivate " + data_partition['path'])
325         c.wait_for_osd_down(osd_uuid)
326         c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path'])
327         c.sh("ceph-disk --verbose activate-journal " + data_partition['journal_dev'] +
328              " --reactivate" + " --dmcrypt")
329         c.wait_for_osd_up(osd_uuid)
330         c.check_osd_status(osd_uuid, 'journal')
331         c.destroy_osd(osd_uuid)
332
333     def test_activate_dmcrypt_plain_no_lockbox(self):
334         c = CephDisk()
335         c.conf['global']['osd dmcrypt type'] = 'plain'
336         c.save_conf()
337         osd_uuid = self.activate_dmcrypt('ceph-disk-no-lockbox')
338         c.destroy_osd(osd_uuid)
339         c.save_conf()
340
341     def test_activate_dmcrypt_luks_no_lockbox(self):
342         c = CephDisk()
343         osd_uuid = self.activate_dmcrypt('ceph-disk-no-lockbox')
344         c.destroy_osd(osd_uuid)
345
346     def test_activate_dmcrypt_luks_with_lockbox(self):
347         c = CephDisk()
348         osd_uuid = self.activate_dmcrypt('ceph-disk')
349         c.destroy_osd(osd_uuid)
350
351     def test_activate_lockbox(self):
352         c = CephDisk()
353         osd_uuid = self.activate_dmcrypt('ceph-disk')
354         lockbox = c.get_lockbox()
355         assert lockbox['state'] == 'active'
356         c.sh("umount " + lockbox['path'])
357         lockbox = c.get_lockbox()
358         assert lockbox['state'] == 'prepared'
359         c.sh("ceph-disk --verbose trigger " + lockbox['path'])
360         success = False
361         for delay in (1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024):
362             lockbox = c.get_lockbox()
363             if lockbox['state'] == 'active':
364                 success = True
365                 break
366             time.sleep(delay)
367         if not success:
368             raise Exception('timeout waiting for lockbox ' + lockbox['path'])
369         c.destroy_osd(osd_uuid)
370
371     def activate_dmcrypt(self, ceph_disk):
372         c = CephDisk()
373         disk = c.unused_disks()[0]
374         osd_uuid = str(uuid.uuid1())
375         journal_uuid = str(uuid.uuid1())
376         c.sh("ceph-disk --verbose zap " + disk)
377         c.sh(ceph_disk + " --verbose prepare --filestore " +
378              " --osd-uuid " + osd_uuid +
379              " --journal-uuid " + journal_uuid +
380              " --dmcrypt " +
381              " " + disk)
382         c.wait_for_osd_up(osd_uuid)
383         c.check_osd_status(osd_uuid, 'journal')
384         return osd_uuid
385
386     def test_trigger_dmcrypt_journal_lockbox(self):
387         c = CephDisk()
388         osd_uuid = self.activate_dmcrypt('ceph-disk')
389         data_partition = c.get_osd_partition(osd_uuid)
390         lockbox_partition = c.get_lockbox()
391         c.deactivate_osd(osd_uuid)
392         c.wait_for_osd_down(osd_uuid)
393         with pytest.raises(subprocess.CalledProcessError):
394             # fails because the lockbox is not mounted yet
395             c.sh("ceph-disk --verbose trigger --sync " + data_partition['journal_dev'])
396         c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path'])
397         c.wait_for_osd_up(osd_uuid)
398         c.destroy_osd(osd_uuid)
399
400     def test_trigger_dmcrypt_data_lockbox(self):
401         c = CephDisk()
402         osd_uuid = self.activate_dmcrypt('ceph-disk')
403         data_partition = c.get_osd_partition(osd_uuid)
404         lockbox_partition = c.get_lockbox()
405         c.deactivate_osd(osd_uuid)
406         c.wait_for_osd_down(osd_uuid)
407         with pytest.raises(subprocess.CalledProcessError):
408             # fails because the lockbox is not mounted yet
409             c.sh("ceph-disk --verbose trigger --sync " + data_partition['path'])
410         c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path'])
411         c.wait_for_osd_up(osd_uuid)
412         c.destroy_osd(osd_uuid)
413
414     def test_trigger_dmcrypt_lockbox(self):
415         c = CephDisk()
416         osd_uuid = self.activate_dmcrypt('ceph-disk')
417         data_partition = c.get_osd_partition(osd_uuid)
418         lockbox_partition = c.get_lockbox()
419         c.deactivate_osd(osd_uuid)
420         c.wait_for_osd_down(osd_uuid)
421         c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path'])
422         c.wait_for_osd_up(osd_uuid)
423         c.destroy_osd(osd_uuid)
424
425     def test_activate_no_journal(self):
426         c = CephDisk()
427         disk = c.unused_disks()[0]
428         osd_uuid = str(uuid.uuid1())
429         c.sh("ceph-disk --verbose zap " + disk)
430         c.conf['global']['osd objectstore'] = 'memstore'
431         c.save_conf()
432         c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
433              " " + disk)
434         c.wait_for_osd_up(osd_uuid)
435         device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
436         assert len(device['partitions']) == 1
437         partition = device['partitions'][0]
438         assert partition['type'] == 'data'
439         assert partition['state'] == 'active'
440         assert 'journal_dev' not in partition
441         c.helper("pool_read_write")
442         c.destroy_osd(osd_uuid)
443         c.save_conf()
444
445     def test_activate_with_journal_dev_no_symlink(self):
446         c = CephDisk()
447         disk = c.unused_disks()[0]
448         osd_uuid = str(uuid.uuid1())
449         c.sh("ceph-disk --verbose zap " + disk)
450         c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
451              " " + disk)
452         c.wait_for_osd_up(osd_uuid)
453         device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
454         assert len(device['partitions']) == 2
455         c.check_osd_status(osd_uuid, 'journal')
456         c.helper("pool_read_write")
457         c.destroy_osd(osd_uuid)
458
459     def test_activate_bluestore(self):
460         c = CephDisk()
461         disk = c.unused_disks()[0]
462         osd_uuid = str(uuid.uuid1())
463         c.sh("ceph-disk --verbose zap " + disk)
464         c.conf['global']['osd objectstore'] = 'bluestore'
465         c.save_conf()
466         c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + osd_uuid +
467              " " + disk)
468         c.wait_for_osd_up(osd_uuid)
469         device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
470         assert len(device['partitions']) == 2
471         c.check_osd_status(osd_uuid, 'block')
472         c.helper("pool_read_write")
473         c.destroy_osd(osd_uuid)
474         c.sh("ceph-disk --verbose zap " + disk)
475
476     def test_activate_bluestore_seperated_block_db_wal(self):
477         c = CephDisk()
478         disk1 = c.unused_disks()[0]
479         disk2 = c.unused_disks()[1]
480         osd_uuid = str(uuid.uuid1())
481         c.sh("ceph-disk --verbose zap " + disk1 + " " + disk2)
482         c.conf['global']['osd objectstore'] = 'bluestore'
483         c.save_conf()
484         c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + osd_uuid +
485              " " + disk1 + " --block.db " + disk2 + " --block.wal " + disk2)
486         c.wait_for_osd_up(osd_uuid)
487         device = json.loads(c.sh("ceph-disk list --format json " + disk1))[0]
488         assert len(device['partitions']) == 2
489         device = json.loads(c.sh("ceph-disk list --format json " + disk2))[0]
490         assert len(device['partitions']) == 2
491         c.check_osd_status(osd_uuid, 'block')
492         c.check_osd_status(osd_uuid, 'block.wal')
493         c.check_osd_status(osd_uuid, 'block.db')
494         c.helper("pool_read_write")
495         c.destroy_osd(osd_uuid)
496         c.sh("ceph-disk --verbose zap " + disk1 + " " + disk2)
497
498     def test_activate_bluestore_reuse_db_wal_partition(self):
499         c = CephDisk()
500         disks = c.unused_disks()
501         block_disk = disks[0]
502         db_wal_disk = disks[1]
503         #
504         # Create an OSD with two disks (one for block, 
505         # the other for block.db and block.wal ) and then destroy osd.
506         #
507         osd_uuid1 = str(uuid.uuid1())
508         c.sh("ceph-disk --verbose zap " + block_disk + " " + db_wal_disk)
509         c.conf['global']['osd objectstore'] = 'bluestore'
510         c.save_conf()
511         c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + 
512              osd_uuid1 + " " + block_disk + " --block.db " + db_wal_disk + 
513              " --block.wal " + db_wal_disk)
514         c.wait_for_osd_up(osd_uuid1)
515         blockdb_partition = c.get_blockdb_partition(osd_uuid1)
516         blockdb_path = blockdb_partition['path']
517         blockwal_partition = c.get_blockwal_partition(osd_uuid1)
518         blockwal_path = blockwal_partition['path']
519         c.destroy_osd(osd_uuid1)
520         c.sh("ceph-disk --verbose zap " + block_disk)
521         #
522         # Create another OSD with the block.db and block.wal partition 
523         # of the previous OSD
524         #
525         osd_uuid2 = str(uuid.uuid1())
526         c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + 
527              osd_uuid2 + " " + block_disk + " --block.db " + blockdb_path + 
528              " --block.wal " + blockwal_path)
529         c.wait_for_osd_up(osd_uuid2)
530         device = json.loads(c.sh("ceph-disk list --format json " + block_disk))[0]
531         assert len(device['partitions']) == 2
532         device = json.loads(c.sh("ceph-disk list --format json " + db_wal_disk))[0]
533         assert len(device['partitions']) == 2
534         c.check_osd_status(osd_uuid2, 'block')
535         c.check_osd_status(osd_uuid2, 'block.wal')
536         c.check_osd_status(osd_uuid2, 'block.db')
537         blockdb_partition = c.get_blockdb_partition(osd_uuid2)
538         blockwal_partition = c.get_blockwal_partition(osd_uuid2)
539         #
540         # Verify the previous OSD partition has been reused
541         #
542         assert blockdb_partition['path'] == blockdb_path
543         assert blockwal_partition['path'] == blockwal_path
544         c.destroy_osd(osd_uuid2)
545         c.sh("ceph-disk --verbose zap " + block_disk + " " + db_wal_disk)
546
547     def test_activate_with_journal_dev_is_symlink(self):
548         c = CephDisk()
549         disk = c.unused_disks()[0]
550         osd_uuid = str(uuid.uuid1())
551         tempdir = tempfile.mkdtemp()
552         symlink = os.path.join(tempdir, 'osd')
553         os.symlink(disk, symlink)
554         c.sh("ceph-disk --verbose zap " + symlink)
555         c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
556              " " + symlink)
557         c.wait_for_osd_up(osd_uuid)
558         device = json.loads(c.sh("ceph-disk list --format json " + symlink))[0]
559         assert len(device['partitions']) == 2
560         data_partition = c.get_osd_partition(osd_uuid)
561         assert data_partition['type'] == 'data'
562         assert data_partition['state'] == 'active'
563         journal_partition = c.get_journal_partition(osd_uuid)
564         assert journal_partition
565         c.helper("pool_read_write")
566         c.destroy_osd(osd_uuid)
567         c.sh("ceph-disk --verbose zap " + symlink)
568         os.unlink(symlink)
569         os.rmdir(tempdir)
570
571     def test_activate_journal_file(self):
572         c = CephDisk()
573         disks = c.unused_disks()
574         data_disk = disks[0]
575         #
576         # /var/lib/ceph/osd is required otherwise it may violate
577         # restrictions enforced by systemd regarding the directories
578         # which ceph-osd is allowed to read/write
579         #
580         tempdir = tempfile.mkdtemp(dir='/var/lib/ceph/osd')
581         c.sh("chown ceph:ceph " + tempdir + " || true")
582         journal_file = os.path.join(tempdir, 'journal')
583         osd_uuid = str(uuid.uuid1())
584         c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
585              " " + data_disk + " " + journal_file)
586         c.wait_for_osd_up(osd_uuid)
587         device = json.loads(
588             c.sh("ceph-disk list --format json " + data_disk))[0]
589         assert len(device['partitions']) == 1
590         partition = device['partitions'][0]
591         assert journal_file == os.readlink(
592             os.path.join(partition['mount'], 'journal'))
593         c.check_osd_status(osd_uuid)
594         c.helper("pool_read_write 1")  # 1 == pool size
595         c.destroy_osd(osd_uuid)
596         c.sh("ceph-disk --verbose zap " + data_disk)
597         os.unlink(journal_file)
598         os.rmdir(tempdir)
599
600     def test_activate_separated_journal(self):
601         c = CephDisk()
602         disks = c.unused_disks()
603         data_disk = disks[0]
604         journal_disk = disks[1]
605         osd_uuid = self.activate_separated_journal(data_disk, journal_disk)
606         c.helper("pool_read_write 1")  # 1 == pool size
607         c.destroy_osd(osd_uuid)
608         c.sh("ceph-disk --verbose zap " + data_disk + " " + journal_disk)
609
610     def test_activate_separated_journal_dev_is_symlink(self):
611         c = CephDisk()
612         disks = c.unused_disks()
613         data_disk = disks[0]
614         journal_disk = disks[1]
615         tempdir = tempfile.mkdtemp()
616         data_symlink = os.path.join(tempdir, 'osd')
617         os.symlink(data_disk, data_symlink)
618         journal_symlink = os.path.join(tempdir, 'journal')
619         os.symlink(journal_disk, journal_symlink)
620         osd_uuid = self.activate_separated_journal(
621             data_symlink, journal_symlink)
622         c.helper("pool_read_write 1")  # 1 == pool size
623         c.destroy_osd(osd_uuid)
624         c.sh("ceph-disk --verbose zap " + data_symlink + " " + journal_symlink)
625         os.unlink(data_symlink)
626         os.unlink(journal_symlink)
627         os.rmdir(tempdir)
628
629     def activate_separated_journal(self, data_disk, journal_disk):
630         c = CephDisk()
631         osd_uuid = str(uuid.uuid1())
632         c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
633              " " + data_disk + " " + journal_disk)
634         c.wait_for_osd_up(osd_uuid)
635         device = json.loads(
636             c.sh("ceph-disk list --format json " + data_disk))[0]
637         assert len(device['partitions']) == 1
638         c.check_osd_status(osd_uuid, 'journal')
639         return osd_uuid
640
641     #
642     # Create an OSD and get a journal partition from a disk that
643     # already contains a journal partition which is in use. Updates of
644     # the kernel partition table may behave differently when a
645     # partition is in use. See http://tracker.ceph.com/issues/7334 for
646     # more information.
647     #
648     def test_activate_two_separated_journal(self):
649         c = CephDisk()
650         disks = c.unused_disks()
651         data_disk = disks[0]
652         other_data_disk = disks[1]
653         journal_disk = disks[2]
654         osd_uuid = self.activate_separated_journal(data_disk, journal_disk)
655         other_osd_uuid = self.activate_separated_journal(
656             other_data_disk, journal_disk)
657         #
658         # read/write can only succeed if the two osds are up because
659         # the pool needs two OSD
660         #
661         c.helper("pool_read_write 2")  # 2 == pool size
662         c.destroy_osd(osd_uuid)
663         c.destroy_osd(other_osd_uuid)
664         c.sh("ceph-disk --verbose zap " + data_disk + " " +
665              journal_disk + " " + other_data_disk)
666
667     #
668     # Create an OSD and reuse an existing journal partition
669     #
670     def test_activate_reuse_journal(self):
671         c = CephDisk()
672         disks = c.unused_disks()
673         data_disk = disks[0]
674         journal_disk = disks[1]
675         #
676         # Create an OSD with a separated journal and destroy it.
677         #
678         osd_uuid = self.activate_separated_journal(data_disk, journal_disk)
679         journal_partition = c.get_journal_partition(osd_uuid)
680         journal_path = journal_partition['path']
681         c.destroy_osd(osd_uuid)
682         c.sh("ceph-disk --verbose zap " + data_disk)
683         osd_uuid = str(uuid.uuid1())
684         #
685         # Create another OSD with the journal partition of the previous OSD
686         #
687         c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
688              " " + data_disk + " " + journal_path)
689         c.helper("pool_read_write 1")  # 1 == pool size
690         c.wait_for_osd_up(osd_uuid)
691         device = json.loads(
692             c.sh("ceph-disk list --format json " + data_disk))[0]
693         assert len(device['partitions']) == 1
694         c.check_osd_status(osd_uuid)
695         journal_partition = c.get_journal_partition(osd_uuid)
696         #
697         # Verify the previous OSD partition has been reused
698         #
699         assert journal_partition['path'] == journal_path
700         c.destroy_osd(osd_uuid)
701         c.sh("ceph-disk --verbose zap " + data_disk + " " + journal_disk)
702
703     def test_activate_multipath(self):
704         c = CephDisk()
705         if c.sh("lsb_release -si").strip() != 'CentOS':
706             pytest.skip(
707                 "see issue https://bugs.launchpad.net/ubuntu/+source/multipath-tools/+bug/1488688")
708         c.ensure_sd()
709         #
710         # Figure out the name of the multipath device
711         #
712         disk = c.unused_disks('sd.')[0]
713         c.sh("mpathconf --enable || true")
714         c.sh("multipath " + disk)
715         holders = os.listdir(
716             "/sys/block/" + os.path.basename(disk) + "/holders")
717         assert 1 == len(holders)
718         name = open("/sys/block/" + holders[0] + "/dm/name").read()
719         multipath = "/dev/mapper/" + name
720         #
721         # Prepare the multipath device
722         #
723         osd_uuid = str(uuid.uuid1())
724         c.sh("ceph-disk --verbose zap " + multipath)
725         c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
726              " " + multipath)
727         c.wait_for_osd_up(osd_uuid)
728         device = json.loads(
729             c.sh("ceph-disk list --format json " + multipath))[0]
730         assert len(device['partitions']) == 2
731         data_partition = c.get_osd_partition(osd_uuid)
732         assert data_partition['type'] == 'data'
733         assert data_partition['state'] == 'active'
734         journal_partition = c.get_journal_partition(osd_uuid)
735         assert journal_partition
736         c.helper("pool_read_write")
737         c.destroy_osd(osd_uuid)
738         c.sh("udevadm settle")
739         c.sh("multipath -F")
740         c.unload_scsi_debug()
741
742
743 class CephDiskTest(CephDisk):
744
745     def main(self, argv):
746         parser = argparse.ArgumentParser(
747             'ceph-disk-test',
748         )
749         parser.add_argument(
750             '-v', '--verbose',
751             action='store_true', default=None,
752             help='be more verbose',
753         )
754         parser.add_argument(
755             '--destroy-osd',
756             help='stop, umount and destroy',
757         )
758         args = parser.parse_args(argv)
759
760         if args.verbose:
761             logging.basicConfig(level=logging.DEBUG)
762
763         if args.destroy_osd:
764             dump = json.loads(CephDisk.sh("ceph osd dump -f json"))
765             osd_uuid = None
766             for osd in dump['osds']:
767                 if str(osd['osd']) == args.destroy_osd:
768                     osd_uuid = osd['uuid']
769             if osd_uuid:
770                 self.destroy_osd(osd_uuid)
771             else:
772                 raise Exception("cannot find OSD " + args.destroy_osd +
773                                 " ceph osd dump -f json")
774             return
775
776 if __name__ == '__main__':
777     sys.exit(CephDiskTest().main(sys.argv[1:]))