3 watch_notify_same_primary task
5 from cStringIO import StringIO
9 from teuthology.orchestra import run
10 from teuthology.contextutil import safe_while
12 log = logging.getLogger(__name__)
15 @contextlib.contextmanager
16 def task(ctx, config):
18 Run watch_notify_same_primary
20 The config should be as follows:
22 watch_notify_same_primary:
23 clients: [client list]
25 The client list should contain 1 client
27 The test requires 3 osds.
33 - watch_notify_same_primary:
37 log.info('Beginning watch_notify_same_primary...')
38 assert isinstance(config, dict), \
39 "please list clients to run on"
41 clients = config.get('clients', ['client.0'])
42 assert len(clients) == 1
44 assert isinstance(role, basestring)
46 assert role.startswith(PREFIX)
47 (remote,) = ctx.cluster.only(role).remotes.iterkeys()
48 manager = ctx.managers['ceph']
49 manager.raw_cluster_cmd('osd', 'set', 'noout')
51 pool = manager.create_pool_with_unique_name()
52 def obj(n): return "foo-{num}".format(num=n)
61 logger=log.getChild('watch.{id}'.format(id=n)))
76 watches = [start_watch(i) for i in range(num)]
78 # wait for them all to register
80 with safe_while() as proceed:
89 lines = proc.stdout.getvalue()
90 num_watchers = lines.count('watcher=')
91 log.info('i see %d watchers for %s', num_watchers, obj(i))
103 logger=log.getChild('notify.{id}'.format(id=n)))
105 [notify(n, 'notify1') for n in range(len(watches))]
108 manager.mark_down_osd(0)
110 [notify(n, 'notify2') for n in range(len(watches))]
115 log.info('joining watch_notify_stress')
116 for watch in watches:
117 watch.stdin.write("\n")
121 for watch in watches:
122 lines = watch.stdout.getvalue().split("\n")
133 manager.revive_osd(0)
134 manager.remove_pool(pool)