X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=src%2Fceph%2Fqa%2Ftasks%2Fresolve_stuck_peering.py;fp=src%2Fceph%2Fqa%2Ftasks%2Fresolve_stuck_peering.py;h=0000000000000000000000000000000000000000;hb=7da45d65be36d36b880cc55c5036e96c24b53f00;hp=bdf86e9242e4928dc30aa6cc17438bee95a8b737;hpb=691462d09d0987b47e112d6ee8740375df3c51b2;p=stor4nfv.git diff --git a/src/ceph/qa/tasks/resolve_stuck_peering.py b/src/ceph/qa/tasks/resolve_stuck_peering.py deleted file mode 100644 index bdf86e9..0000000 --- a/src/ceph/qa/tasks/resolve_stuck_peering.py +++ /dev/null @@ -1,112 +0,0 @@ -""" -Resolve stuck peering -""" -import logging -import time - -from teuthology import misc as teuthology -from util.rados import rados - -log = logging.getLogger(__name__) - -def task(ctx, config): - """ - Test handling resolve stuck peering - - requires 3 osds on a single test node - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'Resolve stuck peering only accepts a dict for config' - - manager = ctx.managers['ceph'] - - while len(manager.get_osd_status()['up']) < 3: - time.sleep(10) - - - manager.wait_for_clean() - - dummyfile = '/etc/fstab' - dummyfile1 = '/etc/resolv.conf' - - #create 1 PG pool - pool='foo' - log.info('creating pool foo') - manager.raw_cluster_cmd('osd', 'pool', 'create', '%s' % pool, '1') - - #set min_size of the pool to 1 - #so that we can continue with I/O - #when 2 osds are down - manager.set_pool_property(pool, "min_size", 1) - - osds = [0, 1, 2] - - primary = manager.get_pg_primary('foo', 0) - log.info("primary osd is %d", primary) - - others = list(osds) - others.remove(primary) - - log.info('writing initial objects') - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - #create few objects - for i in range(100): - rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) - - manager.wait_for_clean() - - #kill other osds except primary - log.info('killing other osds except primary') - for i in others: - manager.kill_osd(i) - for i in others: - manager.mark_down_osd(i) - - - for i in range(100): - rados(ctx, mon, ['-p', 'foo', 'put', 'new_%d' % i, dummyfile1]) - - #kill primary osd - manager.kill_osd(primary) - manager.mark_down_osd(primary) - - #revive other 2 osds - for i in others: - manager.revive_osd(i) - - #make sure that pg is down - #Assuming pg number for single pg pool will start from 0 - pgnum=0 - pgstr = manager.get_pgid(pool, pgnum) - stats = manager.get_single_pg_stats(pgstr) - print stats['state'] - - timeout=60 - start=time.time() - - while 'down' not in stats['state']: - assert time.time() - start < timeout, \ - 'failed to reach down state before timeout expired' - stats = manager.get_single_pg_stats(pgstr) - - #mark primary as lost - manager.raw_cluster_cmd('osd', 'lost', '%d' % primary,\ - '--yes-i-really-mean-it') - - - #expect the pg status to be active+undersized+degraded - #pg should recover and become active+clean within timeout - stats = manager.get_single_pg_stats(pgstr) - print stats['state'] - - timeout=10 - start=time.time() - - while manager.get_num_down(): - assert time.time() - start < timeout, \ - 'failed to recover before timeout expired' - - manager.revive_osd(primary)