Fix some bugs when testing opensds ansible
[stor4nfv.git] / src / ceph / qa / tasks / ceph_objectstore_tool.py
1 """
2 ceph_objectstore_tool - Simple test of ceph-objectstore-tool utility
3 """
4 from cStringIO import StringIO
5 import contextlib
6 import logging
7 import ceph_manager
8 from teuthology import misc as teuthology
9 import time
10 import os
11 import string
12 from teuthology.orchestra import run
13 import sys
14 import tempfile
15 import json
16 from util.rados import (rados, create_replicated_pool, create_ec_pool)
17 # from util.rados import (rados, create_ec_pool,
18 #                               create_replicated_pool,
19 #                               create_cache_pool)
20
21 log = logging.getLogger(__name__)
22
23 # Should get cluster name "ceph" from somewhere
24 # and normal path from osd_data and osd_journal in conf
25 FSPATH = "/var/lib/ceph/osd/ceph-{id}"
26 JPATH = "/var/lib/ceph/osd/ceph-{id}/journal"
27
28
29 def cod_setup_local_data(log, ctx, NUM_OBJECTS, DATADIR,
30                          BASE_NAME, DATALINECOUNT):
31     objects = range(1, NUM_OBJECTS + 1)
32     for i in objects:
33         NAME = BASE_NAME + "{num}".format(num=i)
34         LOCALNAME = os.path.join(DATADIR, NAME)
35
36         dataline = range(DATALINECOUNT)
37         fd = open(LOCALNAME, "w")
38         data = "This is the data for " + NAME + "\n"
39         for _ in dataline:
40             fd.write(data)
41         fd.close()
42
43
44 def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
45                           BASE_NAME, DATALINECOUNT):
46
47     objects = range(1, NUM_OBJECTS + 1)
48     for i in objects:
49         NAME = BASE_NAME + "{num}".format(num=i)
50         DDNAME = os.path.join(DATADIR, NAME)
51
52         remote.run(args=['rm', '-f', DDNAME])
53
54         dataline = range(DATALINECOUNT)
55         data = "This is the data for " + NAME + "\n"
56         DATA = ""
57         for _ in dataline:
58             DATA += data
59         teuthology.write_file(remote, DDNAME, DATA)
60
61
62 def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR,
63               BASE_NAME, DATALINECOUNT, POOL, db, ec):
64     ERRORS = 0
65     log.info("Creating {objs} objects in pool".format(objs=NUM_OBJECTS))
66
67     objects = range(1, NUM_OBJECTS + 1)
68     for i in objects:
69         NAME = BASE_NAME + "{num}".format(num=i)
70         DDNAME = os.path.join(DATADIR, NAME)
71
72         proc = rados(ctx, remote, ['-p', POOL, 'put', NAME, DDNAME],
73                      wait=False)
74         # proc = remote.run(args=['rados', '-p', POOL, 'put', NAME, DDNAME])
75         ret = proc.wait()
76         if ret != 0:
77             log.critical("Rados put failed with status {ret}".
78                          format(ret=proc.exitstatus))
79             sys.exit(1)
80
81         db[NAME] = {}
82
83         keys = range(i)
84         db[NAME]["xattr"] = {}
85         for k in keys:
86             if k == 0:
87                 continue
88             mykey = "key{i}-{k}".format(i=i, k=k)
89             myval = "val{i}-{k}".format(i=i, k=k)
90             proc = remote.run(args=['rados', '-p', POOL, 'setxattr',
91                                     NAME, mykey, myval])
92             ret = proc.wait()
93             if ret != 0:
94                 log.error("setxattr failed with {ret}".format(ret=ret))
95                 ERRORS += 1
96             db[NAME]["xattr"][mykey] = myval
97
98         # Erasure coded pools don't support omap
99         if ec:
100             continue
101
102         # Create omap header in all objects but REPobject1
103         if i != 1:
104             myhdr = "hdr{i}".format(i=i)
105             proc = remote.run(args=['rados', '-p', POOL, 'setomapheader',
106                                     NAME, myhdr])
107             ret = proc.wait()
108             if ret != 0:
109                 log.critical("setomapheader failed with {ret}".format(ret=ret))
110                 ERRORS += 1
111             db[NAME]["omapheader"] = myhdr
112
113         db[NAME]["omap"] = {}
114         for k in keys:
115             if k == 0:
116                 continue
117             mykey = "okey{i}-{k}".format(i=i, k=k)
118             myval = "oval{i}-{k}".format(i=i, k=k)
119             proc = remote.run(args=['rados', '-p', POOL, 'setomapval',
120                                     NAME, mykey, myval])
121             ret = proc.wait()
122             if ret != 0:
123                 log.critical("setomapval failed with {ret}".format(ret=ret))
124             db[NAME]["omap"][mykey] = myval
125
126     return ERRORS
127
128
129 def get_lines(filename):
130     tmpfd = open(filename, "r")
131     line = True
132     lines = []
133     while line:
134         line = tmpfd.readline().rstrip('\n')
135         if line:
136             lines += [line]
137     tmpfd.close()
138     os.unlink(filename)
139     return lines
140
141
142 @contextlib.contextmanager
143 def task(ctx, config):
144     """
145     Run ceph_objectstore_tool test
146
147     The config should be as follows::
148
149         ceph_objectstore_tool:
150           objects: 20 # <number of objects>
151           pgnum: 12
152     """
153
154     if config is None:
155         config = {}
156     assert isinstance(config, dict), \
157         'ceph_objectstore_tool task only accepts a dict for configuration'
158
159     log.info('Beginning ceph_objectstore_tool...')
160
161     log.debug(config)
162     log.debug(ctx)
163     clients = ctx.cluster.only(teuthology.is_type('client'))
164     assert len(clients.remotes) > 0, 'Must specify at least 1 client'
165     (cli_remote, _) = clients.remotes.popitem()
166     log.debug(cli_remote)
167
168     # clients = dict(teuthology.get_clients(ctx=ctx, roles=config.keys()))
169     # client = clients.popitem()
170     # log.info(client)
171     osds = ctx.cluster.only(teuthology.is_type('osd'))
172     log.info("OSDS")
173     log.info(osds)
174     log.info(osds.remotes)
175
176     manager = ctx.managers['ceph']
177     while (len(manager.get_osd_status()['up']) !=
178            len(manager.get_osd_status()['raw'])):
179         time.sleep(10)
180     while (len(manager.get_osd_status()['in']) !=
181            len(manager.get_osd_status()['up'])):
182         time.sleep(10)
183     manager.raw_cluster_cmd('osd', 'set', 'noout')
184     manager.raw_cluster_cmd('osd', 'set', 'nodown')
185
186     PGNUM = config.get('pgnum', 12)
187     log.info("pgnum: {num}".format(num=PGNUM))
188
189     ERRORS = 0
190
191     REP_POOL = "rep_pool"
192     REP_NAME = "REPobject"
193     create_replicated_pool(cli_remote, REP_POOL, PGNUM)
194     ERRORS += test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME)
195
196     EC_POOL = "ec_pool"
197     EC_NAME = "ECobject"
198     create_ec_pool(cli_remote, EC_POOL, 'default', PGNUM)
199     ERRORS += test_objectstore(ctx, config, cli_remote,
200                                EC_POOL, EC_NAME, ec=True)
201
202     if ERRORS == 0:
203         log.info("TEST PASSED")
204     else:
205         log.error("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))
206
207     assert ERRORS == 0
208
209     try:
210         yield
211     finally:
212         log.info('Ending ceph_objectstore_tool')
213
214
215 def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False):
216     manager = ctx.managers['ceph']
217
218     osds = ctx.cluster.only(teuthology.is_type('osd'))
219
220     TEUTHDIR = teuthology.get_testdir(ctx)
221     DATADIR = os.path.join(TEUTHDIR, "ceph.data")
222     DATALINECOUNT = 10000
223     ERRORS = 0
224     NUM_OBJECTS = config.get('objects', 10)
225     log.info("objects: {num}".format(num=NUM_OBJECTS))
226
227     pool_dump = manager.get_pool_dump(REP_POOL)
228     REPID = pool_dump['pool']
229
230     log.debug("repid={num}".format(num=REPID))
231
232     db = {}
233
234     LOCALDIR = tempfile.mkdtemp("cod")
235
236     cod_setup_local_data(log, ctx, NUM_OBJECTS, LOCALDIR,
237                          REP_NAME, DATALINECOUNT)
238     allremote = []
239     allremote.append(cli_remote)
240     allremote += osds.remotes.keys()
241     allremote = list(set(allremote))
242     for remote in allremote:
243         cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
244                               REP_NAME, DATALINECOUNT)
245
246     ERRORS += cod_setup(log, ctx, cli_remote, NUM_OBJECTS, DATADIR,
247                         REP_NAME, DATALINECOUNT, REP_POOL, db, ec)
248
249     pgs = {}
250     for stats in manager.get_pg_stats():
251         if stats["pgid"].find(str(REPID) + ".") != 0:
252             continue
253         if pool_dump["type"] == ceph_manager.CephManager.REPLICATED_POOL:
254             for osd in stats["acting"]:
255                 pgs.setdefault(osd, []).append(stats["pgid"])
256         elif pool_dump["type"] == ceph_manager.CephManager.ERASURE_CODED_POOL:
257             shard = 0
258             for osd in stats["acting"]:
259                 pgs.setdefault(osd, []).append("{pgid}s{shard}".
260                                                format(pgid=stats["pgid"],
261                                                       shard=shard))
262                 shard += 1
263         else:
264             raise Exception("{pool} has an unexpected type {type}".
265                             format(pool=REP_POOL, type=pool_dump["type"]))
266
267     log.info(pgs)
268     log.info(db)
269
270     for osd in manager.get_osd_status()['up']:
271         manager.kill_osd(osd)
272     time.sleep(5)
273
274     pgswithobjects = set()
275     objsinpg = {}
276
277     # Test --op list and generate json for all objects
278     log.info("Test --op list by generating json for all objects")
279     prefix = ("sudo ceph-objectstore-tool "
280               "--data-path {fpath} "
281               "--journal-path {jpath} ").format(fpath=FSPATH, jpath=JPATH)
282     for remote in osds.remotes.iterkeys():
283         log.debug(remote)
284         log.debug(osds.remotes[remote])
285         for role in osds.remotes[remote]:
286             if string.find(role, "osd.") != 0:
287                 continue
288             osdid = int(role.split('.')[1])
289             log.info("process osd.{id} on {remote}".
290                      format(id=osdid, remote=remote))
291             cmd = (prefix + "--op list").format(id=osdid)
292             proc = remote.run(args=cmd.split(), check_status=False,
293                               stdout=StringIO())
294             if proc.exitstatus != 0:
295                 log.error("Bad exit status {ret} from --op list request".
296                           format(ret=proc.exitstatus))
297                 ERRORS += 1
298             else:
299                 for pgline in proc.stdout.getvalue().splitlines():
300                     if not pgline:
301                         continue
302                     (pg, obj) = json.loads(pgline)
303                     name = obj['oid']
304                     if name in db:
305                         pgswithobjects.add(pg)
306                         objsinpg.setdefault(pg, []).append(name)
307                         db[name].setdefault("pg2json",
308                                             {})[pg] = json.dumps(obj)
309
310     log.info(db)
311     log.info(pgswithobjects)
312     log.info(objsinpg)
313
314     if pool_dump["type"] == ceph_manager.CephManager.REPLICATED_POOL:
315         # Test get-bytes
316         log.info("Test get-bytes and set-bytes")
317         for basename in db.keys():
318             file = os.path.join(DATADIR, basename)
319             GETNAME = os.path.join(DATADIR, "get")
320             SETNAME = os.path.join(DATADIR, "set")
321
322             for remote in osds.remotes.iterkeys():
323                 for role in osds.remotes[remote]:
324                     if string.find(role, "osd.") != 0:
325                         continue
326                     osdid = int(role.split('.')[1])
327                     if osdid not in pgs:
328                         continue
329
330                     for pg, JSON in db[basename]["pg2json"].iteritems():
331                         if pg in pgs[osdid]:
332                             cmd = ((prefix + "--pgid {pg}").
333                                    format(id=osdid, pg=pg).split())
334                             cmd.append(run.Raw("'{json}'".format(json=JSON)))
335                             cmd += ("get-bytes {fname}".
336                                     format(fname=GETNAME).split())
337                             proc = remote.run(args=cmd, check_status=False)
338                             if proc.exitstatus != 0:
339                                 remote.run(args="rm -f {getfile}".
340                                            format(getfile=GETNAME).split())
341                                 log.error("Bad exit status {ret}".
342                                           format(ret=proc.exitstatus))
343                                 ERRORS += 1
344                                 continue
345                             cmd = ("diff -q {file} {getfile}".
346                                    format(file=file, getfile=GETNAME))
347                             proc = remote.run(args=cmd.split())
348                             if proc.exitstatus != 0:
349                                 log.error("Data from get-bytes differ")
350                                 # log.debug("Got:")
351                                 # cat_file(logging.DEBUG, GETNAME)
352                                 # log.debug("Expected:")
353                                 # cat_file(logging.DEBUG, file)
354                                 ERRORS += 1
355                             remote.run(args="rm -f {getfile}".
356                                        format(getfile=GETNAME).split())
357
358                             data = ("put-bytes going into {file}\n".
359                                     format(file=file))
360                             teuthology.write_file(remote, SETNAME, data)
361                             cmd = ((prefix + "--pgid {pg}").
362                                    format(id=osdid, pg=pg).split())
363                             cmd.append(run.Raw("'{json}'".format(json=JSON)))
364                             cmd += ("set-bytes {fname}".
365                                     format(fname=SETNAME).split())
366                             proc = remote.run(args=cmd, check_status=False)
367                             proc.wait()
368                             if proc.exitstatus != 0:
369                                 log.info("set-bytes failed for object {obj} "
370                                          "in pg {pg} osd.{id} ret={ret}".
371                                          format(obj=basename, pg=pg,
372                                                 id=osdid, ret=proc.exitstatus))
373                                 ERRORS += 1
374
375                             cmd = ((prefix + "--pgid {pg}").
376                                    format(id=osdid, pg=pg).split())
377                             cmd.append(run.Raw("'{json}'".format(json=JSON)))
378                             cmd += "get-bytes -".split()
379                             proc = remote.run(args=cmd, check_status=False,
380                                               stdout=StringIO())
381                             proc.wait()
382                             if proc.exitstatus != 0:
383                                 log.error("get-bytes after "
384                                           "set-bytes ret={ret}".
385                                           format(ret=proc.exitstatus))
386                                 ERRORS += 1
387                             else:
388                                 if data != proc.stdout.getvalue():
389                                     log.error("Data inconsistent after "
390                                               "set-bytes, got:")
391                                     log.error(proc.stdout.getvalue())
392                                     ERRORS += 1
393
394                             cmd = ((prefix + "--pgid {pg}").
395                                    format(id=osdid, pg=pg).split())
396                             cmd.append(run.Raw("'{json}'".format(json=JSON)))
397                             cmd += ("set-bytes {fname}".
398                                     format(fname=file).split())
399                             proc = remote.run(args=cmd, check_status=False)
400                             proc.wait()
401                             if proc.exitstatus != 0:
402                                 log.info("set-bytes failed for object {obj} "
403                                          "in pg {pg} osd.{id} ret={ret}".
404                                          format(obj=basename, pg=pg,
405                                                 id=osdid, ret=proc.exitstatus))
406                                 ERRORS += 1
407
408     log.info("Test list-attrs get-attr")
409     for basename in db.keys():
410         file = os.path.join(DATADIR, basename)
411         GETNAME = os.path.join(DATADIR, "get")
412         SETNAME = os.path.join(DATADIR, "set")
413
414         for remote in osds.remotes.iterkeys():
415             for role in osds.remotes[remote]:
416                 if string.find(role, "osd.") != 0:
417                     continue
418                 osdid = int(role.split('.')[1])
419                 if osdid not in pgs:
420                     continue
421
422                 for pg, JSON in db[basename]["pg2json"].iteritems():
423                     if pg in pgs[osdid]:
424                         cmd = ((prefix + "--pgid {pg}").
425                                format(id=osdid, pg=pg).split())
426                         cmd.append(run.Raw("'{json}'".format(json=JSON)))
427                         cmd += ["list-attrs"]
428                         proc = remote.run(args=cmd, check_status=False,
429                                           stdout=StringIO(), stderr=StringIO())
430                         proc.wait()
431                         if proc.exitstatus != 0:
432                             log.error("Bad exit status {ret}".
433                                       format(ret=proc.exitstatus))
434                             ERRORS += 1
435                             continue
436                         keys = proc.stdout.getvalue().split()
437                         values = dict(db[basename]["xattr"])
438
439                         for key in keys:
440                             if (key == "_" or
441                                     key == "snapset" or
442                                     key == "hinfo_key"):
443                                 continue
444                             key = key.strip("_")
445                             if key not in values:
446                                 log.error("The key {key} should be present".
447                                           format(key=key))
448                                 ERRORS += 1
449                                 continue
450                             exp = values.pop(key)
451                             cmd = ((prefix + "--pgid {pg}").
452                                    format(id=osdid, pg=pg).split())
453                             cmd.append(run.Raw("'{json}'".format(json=JSON)))
454                             cmd += ("get-attr {key}".
455                                     format(key="_" + key).split())
456                             proc = remote.run(args=cmd, check_status=False,
457                                               stdout=StringIO())
458                             proc.wait()
459                             if proc.exitstatus != 0:
460                                 log.error("get-attr failed with {ret}".
461                                           format(ret=proc.exitstatus))
462                                 ERRORS += 1
463                                 continue
464                             val = proc.stdout.getvalue()
465                             if exp != val:
466                                 log.error("For key {key} got value {got} "
467                                           "instead of {expected}".
468                                           format(key=key, got=val,
469                                                  expected=exp))
470                                 ERRORS += 1
471                         if "hinfo_key" in keys:
472                             cmd_prefix = prefix.format(id=osdid)
473                             cmd = """
474       expected=$({prefix} --pgid {pg} '{json}' get-attr {key} | base64)
475       echo placeholder | {prefix} --pgid {pg} '{json}' set-attr {key} -
476       test $({prefix} --pgid {pg} '{json}' get-attr {key}) = placeholder
477       echo $expected | base64 --decode | \
478          {prefix} --pgid {pg} '{json}' set-attr {key} -
479       test $({prefix} --pgid {pg} '{json}' get-attr {key} | base64) = $expected
480                             """.format(prefix=cmd_prefix, pg=pg, json=JSON,
481                                        key="hinfo_key")
482                             log.debug(cmd)
483                             proc = remote.run(args=['bash', '-e', '-x',
484                                                     '-c', cmd],
485                                               check_status=False,
486                                               stdout=StringIO(),
487                                               stderr=StringIO())
488                             proc.wait()
489                             if proc.exitstatus != 0:
490                                 log.error("failed with " +
491                                           str(proc.exitstatus))
492                                 log.error(proc.stdout.getvalue() + " " +
493                                           proc.stderr.getvalue())
494                                 ERRORS += 1
495
496                         if len(values) != 0:
497                             log.error("Not all keys found, remaining keys:")
498                             log.error(values)
499
500     log.info("Test pg info")
501     for remote in osds.remotes.iterkeys():
502         for role in osds.remotes[remote]:
503             if string.find(role, "osd.") != 0:
504                 continue
505             osdid = int(role.split('.')[1])
506             if osdid not in pgs:
507                 continue
508
509             for pg in pgs[osdid]:
510                 cmd = ((prefix + "--op info --pgid {pg}").
511                        format(id=osdid, pg=pg).split())
512                 proc = remote.run(args=cmd, check_status=False,
513                                   stdout=StringIO())
514                 proc.wait()
515                 if proc.exitstatus != 0:
516                     log.error("Failure of --op info command with {ret}".
517                               format(proc.exitstatus))
518                     ERRORS += 1
519                     continue
520                 info = proc.stdout.getvalue()
521                 if not str(pg) in info:
522                     log.error("Bad data from info: {info}".format(info=info))
523                     ERRORS += 1
524
525     log.info("Test pg logging")
526     for remote in osds.remotes.iterkeys():
527         for role in osds.remotes[remote]:
528             if string.find(role, "osd.") != 0:
529                 continue
530             osdid = int(role.split('.')[1])
531             if osdid not in pgs:
532                 continue
533
534             for pg in pgs[osdid]:
535                 cmd = ((prefix + "--op log --pgid {pg}").
536                        format(id=osdid, pg=pg).split())
537                 proc = remote.run(args=cmd, check_status=False,
538                                   stdout=StringIO())
539                 proc.wait()
540                 if proc.exitstatus != 0:
541                     log.error("Getting log failed for pg {pg} "
542                               "from osd.{id} with {ret}".
543                               format(pg=pg, id=osdid, ret=proc.exitstatus))
544                     ERRORS += 1
545                     continue
546                 HASOBJ = pg in pgswithobjects
547                 MODOBJ = "modify" in proc.stdout.getvalue()
548                 if HASOBJ != MODOBJ:
549                     log.error("Bad log for pg {pg} from osd.{id}".
550                               format(pg=pg, id=osdid))
551                     MSG = (HASOBJ and [""] or ["NOT "])[0]
552                     log.error("Log should {msg}have a modify entry".
553                               format(msg=MSG))
554                     ERRORS += 1
555
556     log.info("Test pg export")
557     EXP_ERRORS = 0
558     for remote in osds.remotes.iterkeys():
559         for role in osds.remotes[remote]:
560             if string.find(role, "osd.") != 0:
561                 continue
562             osdid = int(role.split('.')[1])
563             if osdid not in pgs:
564                 continue
565
566             for pg in pgs[osdid]:
567                 fpath = os.path.join(DATADIR, "osd{id}.{pg}".
568                                      format(id=osdid, pg=pg))
569
570                 cmd = ((prefix + "--op export --pgid {pg} --file {file}").
571                        format(id=osdid, pg=pg, file=fpath))
572                 proc = remote.run(args=cmd, check_status=False,
573                                   stdout=StringIO())
574                 proc.wait()
575                 if proc.exitstatus != 0:
576                     log.error("Exporting failed for pg {pg} "
577                               "on osd.{id} with {ret}".
578                               format(pg=pg, id=osdid, ret=proc.exitstatus))
579                     EXP_ERRORS += 1
580
581     ERRORS += EXP_ERRORS
582
583     log.info("Test pg removal")
584     RM_ERRORS = 0
585     for remote in osds.remotes.iterkeys():
586         for role in osds.remotes[remote]:
587             if string.find(role, "osd.") != 0:
588                 continue
589             osdid = int(role.split('.')[1])
590             if osdid not in pgs:
591                 continue
592
593             for pg in pgs[osdid]:
594                 cmd = ((prefix + "--force --op remove --pgid {pg}").
595                        format(pg=pg, id=osdid))
596                 proc = remote.run(args=cmd, check_status=False,
597                                   stdout=StringIO())
598                 proc.wait()
599                 if proc.exitstatus != 0:
600                     log.error("Removing failed for pg {pg} "
601                               "on osd.{id} with {ret}".
602                               format(pg=pg, id=osdid, ret=proc.exitstatus))
603                     RM_ERRORS += 1
604
605     ERRORS += RM_ERRORS
606
607     IMP_ERRORS = 0
608     if EXP_ERRORS == 0 and RM_ERRORS == 0:
609         log.info("Test pg import")
610
611         for remote in osds.remotes.iterkeys():
612             for role in osds.remotes[remote]:
613                 if string.find(role, "osd.") != 0:
614                     continue
615                 osdid = int(role.split('.')[1])
616                 if osdid not in pgs:
617                     continue
618
619                 for pg in pgs[osdid]:
620                     fpath = os.path.join(DATADIR, "osd{id}.{pg}".
621                                          format(id=osdid, pg=pg))
622
623                     cmd = ((prefix + "--op import --file {file}").
624                            format(id=osdid, file=fpath))
625                     proc = remote.run(args=cmd, check_status=False,
626                                       stdout=StringIO())
627                     proc.wait()
628                     if proc.exitstatus != 0:
629                         log.error("Import failed from {file} with {ret}".
630                                   format(file=fpath, ret=proc.exitstatus))
631                         IMP_ERRORS += 1
632     else:
633         log.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")
634
635     ERRORS += IMP_ERRORS
636
637     if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
638         log.info("Restarting OSDs....")
639         # They are still look to be up because of setting nodown
640         for osd in manager.get_osd_status()['up']:
641             manager.revive_osd(osd)
642         # Wait for health?
643         time.sleep(5)
644         # Let scrub after test runs verify consistency of all copies
645         log.info("Verify replicated import data")
646         objects = range(1, NUM_OBJECTS + 1)
647         for i in objects:
648             NAME = REP_NAME + "{num}".format(num=i)
649             TESTNAME = os.path.join(DATADIR, "gettest")
650             REFNAME = os.path.join(DATADIR, NAME)
651
652             proc = rados(ctx, cli_remote,
653                          ['-p', REP_POOL, 'get', NAME, TESTNAME], wait=False)
654
655             ret = proc.wait()
656             if ret != 0:
657                 log.error("After import, rados get failed with {ret}".
658                           format(ret=proc.exitstatus))
659                 ERRORS += 1
660                 continue
661
662             cmd = "diff -q {gettest} {ref}".format(gettest=TESTNAME,
663                                                    ref=REFNAME)
664             proc = cli_remote.run(args=cmd, check_status=False)
665             proc.wait()
666             if proc.exitstatus != 0:
667                 log.error("Data comparison failed for {obj}".format(obj=NAME))
668                 ERRORS += 1
669
670     return ERRORS