6 from teuthology import misc as teuthology
9 log = logging.getLogger(__name__)
11 def task(ctx, config):
13 Test monitor recovery.
17 assert isinstance(config, dict), \
18 'task only accepts a dict for configuration'
19 first_mon = teuthology.get_first_mon(ctx, config)
20 (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
22 manager = ceph_manager.CephManager(
25 logger=log.getChild('ceph_manager'),
28 mons = [f.split('.')[1] for f in teuthology.get_mon_names(ctx)]
29 log.info("mon ids = %s" % mons)
31 manager.wait_for_mon_quorum_size(len(mons))
33 log.info('verifying all monitors are in the quorum')
35 s = manager.get_mon_status(m)
36 assert s['state'] == 'leader' or s['state'] == 'peon'
37 assert len(s['quorum']) == len(mons)
39 log.info('restarting each monitor in turn')
43 manager.wait_for_mon_quorum_size(len(mons) - 1)
47 manager.wait_for_mon_quorum_size(len(mons))
49 # in forward and reverse order,
52 for mons in mons, rmons:
53 log.info('stopping all monitors')
57 log.info('forming a minimal quorum for %s, then adding monitors' % mons)
58 qnum = (len(mons) / 2) + 1
64 manager.wait_for_mon_quorum_size(num)
66 # on both leader and non-leader ranks...
69 log.info('removing mon %s' % mons[rank])
70 manager.kill_mon(mons[rank])
71 manager.wait_for_mon_quorum_size(len(mons) - 1)
73 log.info('causing some monitor log activity')
76 manager.raw_cluster_cmd('log', '%d of %d' % (n, m))
78 log.info('adding mon %s back in' % mons[rank])
79 manager.revive_mon(mons[rank])
80 manager.wait_for_mon_quorum_size(len(mons))