3 from __future__ import print_function
4 from subprocess import call
6 from subprocess import check_output
8 def check_output(*popenargs, **kwargs):
10 # backported from python 2.7 stdlib
11 process = subprocess.Popen(
12 stdout=subprocess.PIPE, *popenargs, **kwargs)
13 output, unused_err = process.communicate()
14 retcode = process.poll()
16 cmd = kwargs.get("args")
19 error = subprocess.CalledProcessError(retcode, cmd)
37 from subprocess import DEVNULL
39 DEVNULL = open(os.devnull, "wb")
41 logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
44 if sys.version_info[0] >= 3:
46 return s.decode('utf-8')
48 def check_output(*args, **kwargs):
49 return decode(subprocess.check_output(*args, **kwargs))
56 def wait_for_health():
57 print("Wait for health_ok...", end="")
59 while call("{path}/ceph health 2> /dev/null | grep -v 'HEALTH_OK\|HEALTH_WARN' > /dev/null".format(path=CEPH_BIN), shell=True) == 0:
62 raise Exception("Time exceeded to go to health")
67 def get_pool_id(name, nullfd):
68 cmd = "{path}/ceph osd pool stats {pool}".format(pool=name, path=CEPH_BIN).split()
69 # pool {pool} id # .... grab the 4 field
70 return check_output(cmd, stderr=nullfd).split()[3]
73 # return a list of unique PGS given an osd subdirectory
74 def get_osd_pgs(SUBDIR, ID):
77 endhead = re.compile("{id}.*_head$".format(id=ID))
78 DIR = os.path.join(SUBDIR, "current")
79 PGS += [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and (ID is None or endhead.match(f))]
80 PGS = [re.sub("_head", "", p) for p in PGS if "_head" in p]
84 # return a sorted list of unique PGs given a directory
86 OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
89 SUBDIR = os.path.join(DIR, d)
90 PGS += get_osd_pgs(SUBDIR, ID)
91 return sorted(set(PGS))
94 # return a sorted list of PGS a subset of ALLPGS that contain objects with prefix specified
95 def get_objs(ALLPGS, prefix, DIR, ID):
96 OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
99 DIRL2 = os.path.join(DIR, d)
100 SUBDIR = os.path.join(DIRL2, "current")
103 if not os.path.isdir(os.path.join(SUBDIR, PGDIR)):
105 FINALDIR = os.path.join(SUBDIR, PGDIR)
106 # See if there are any objects there
107 if any(f for f in [val for _, _, fl in os.walk(FINALDIR) for val in fl] if f.startswith(prefix)):
109 return sorted(set(PGS))
112 # return a sorted list of OSDS which have data from a given PG
113 def get_osds(PG, DIR):
114 ALLOSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
117 DIRL2 = os.path.join(DIR, d)
118 SUBDIR = os.path.join(DIRL2, "current")
120 if not os.path.isdir(os.path.join(SUBDIR, PGDIR)):
126 def get_lines(filename):
127 tmpfd = open(filename, "r")
131 line = tmpfd.readline().rstrip('\n')
139 def cat_file(level, filename):
140 if level < logging.getLogger().getEffectiveLevel():
142 print("File: " + filename)
143 with open(filename, "r") as f:
145 line = f.readline().rstrip('\n')
152 def vstart(new, opt=""):
153 print("vstarting....", end="")
154 NEW = new and "-n" or "-N"
155 call("MON=1 OSD=4 MDS=0 MGR=1 CEPH_PORT=7400 {path}/src/vstart.sh --filestore --short -l {new} -d {opt} > /dev/null 2>&1".format(new=NEW, opt=opt, path=CEPH_ROOT), shell=True)
159 def test_failure(cmd, errmsg, tty=False):
162 ttyfd = open("/dev/tty", "rwb")
163 except Exception as e:
165 logging.info("SKIP " + cmd)
167 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
168 tmpfd = open(TMPFILE, "wb")
172 ret = call(cmd, shell=True, stdin=ttyfd, stdout=ttyfd, stderr=tmpfd)
175 ret = call(cmd, shell=True, stderr=tmpfd)
179 logging.error("Should have failed, but got exit 0")
181 lines = get_lines(TMPFILE)
182 matched = [ l for l in lines if errmsg in l ]
184 logging.info("Correctly failed with message \"" + matched[0] + "\"")
187 logging.error("Command: " + cmd )
188 logging.error("Bad messages to stderr \"" + str(lines) + "\"")
189 logging.error("Expected \"" + errmsg + "\"")
196 return "ns{num}".format(num=num)
199 def verify(DATADIR, POOL, NAME_PREFIX, db):
200 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
202 for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(NAME_PREFIX) == 0]:
203 nsfile = rawnsfile.split("__")[0]
204 clone = rawnsfile.split("__")[1]
205 nspace = nsfile.split("-")[0]
206 file = nsfile.split("-")[1]
210 path = os.path.join(DATADIR, rawnsfile)
215 cmd = "{path}/rados -p {pool} -N '{nspace}' get {file} {out}".format(pool=POOL, file=file, out=TMPFILE, nspace=nspace, path=CEPH_BIN)
217 call(cmd, shell=True, stdout=DEVNULL, stderr=DEVNULL)
218 cmd = "diff -q {src} {result}".format(src=path, result=TMPFILE)
220 ret = call(cmd, shell=True)
222 logging.error("{file} data not imported properly".format(file=file))
228 for key, val in db[nspace][file]["xattr"].items():
229 cmd = "{path}/rados -p {pool} -N '{nspace}' getxattr {name} {key}".format(pool=POOL, name=file, key=key, nspace=nspace, path=CEPH_BIN)
231 getval = check_output(cmd, shell=True, stderr=DEVNULL)
232 logging.debug("getxattr {key} {val}".format(key=key, val=getval))
234 logging.error("getxattr of key {key} returned wrong val: {get} instead of {orig}".format(key=key, get=getval, orig=val))
237 hdr = db[nspace][file].get("omapheader", "")
238 cmd = "{path}/rados -p {pool} -N '{nspace}' getomapheader {name} {file}".format(pool=POOL, name=file, nspace=nspace, file=TMPFILE, path=CEPH_BIN)
240 ret = call(cmd, shell=True, stderr=DEVNULL)
242 logging.error("rados getomapheader returned {ret}".format(ret=ret))
245 getlines = get_lines(TMPFILE)
246 assert(len(getlines) == 0 or len(getlines) == 1)
247 if len(getlines) == 0:
251 logging.debug("header: {hdr}".format(hdr=gethdr))
253 logging.error("getomapheader returned wrong val: {get} instead of {orig}".format(get=gethdr, orig=hdr))
255 for key, val in db[nspace][file]["omap"].items():
256 cmd = "{path}/rados -p {pool} -N '{nspace}' getomapval {name} {key} {file}".format(pool=POOL, name=file, key=key, nspace=nspace, file=TMPFILE, path=CEPH_BIN)
258 ret = call(cmd, shell=True, stderr=DEVNULL)
260 logging.error("getomapval returned {ret}".format(ret=ret))
263 getlines = get_lines(TMPFILE)
264 if len(getlines) != 1:
265 logging.error("Bad data from getomapval {lines}".format(lines=getlines))
269 logging.debug("getomapval {key} {val}".format(key=key, val=getval))
271 logging.error("getomapval returned wrong val: {get} instead of {orig}".format(get=getval, orig=val))
280 def check_journal(jsondict):
282 if 'header' not in jsondict:
283 logging.error("Key 'header' not in dump-journal")
285 elif 'max_size' not in jsondict['header']:
286 logging.error("Key 'max_size' not in dump-journal header")
289 print("\tJournal max_size = {size}".format(size=jsondict['header']['max_size']))
290 if 'entries' not in jsondict:
291 logging.error("Key 'entries' not in dump-journal output")
293 elif len(jsondict['entries']) == 0:
294 logging.info("No entries in journal found")
296 errors += check_journal_entries(jsondict['entries'])
300 def check_journal_entries(entries):
302 for enum in range(len(entries)):
303 if 'offset' not in entries[enum]:
304 logging.error("No 'offset' key in entry {e}".format(e=enum))
306 if 'seq' not in entries[enum]:
307 logging.error("No 'seq' key in entry {e}".format(e=enum))
309 if 'transactions' not in entries[enum]:
310 logging.error("No 'transactions' key in entry {e}".format(e=enum))
312 elif len(entries[enum]['transactions']) == 0:
313 logging.error("No transactions found in entry {e}".format(e=enum))
316 errors += check_entry_transactions(entries[enum], enum)
320 def check_entry_transactions(entry, enum):
322 for tnum in range(len(entry['transactions'])):
323 if 'trans_num' not in entry['transactions'][tnum]:
324 logging.error("Key 'trans_num' missing from entry {e} trans {t}".format(e=enum, t=tnum))
326 elif entry['transactions'][tnum]['trans_num'] != tnum:
327 ft = entry['transactions'][tnum]['trans_num']
328 logging.error("Bad trans_num ({ft}) entry {e} trans {t}".format(ft=ft, e=enum, t=tnum))
330 if 'ops' not in entry['transactions'][tnum]:
331 logging.error("Key 'ops' missing from entry {e} trans {t}".format(e=enum, t=tnum))
334 errors += check_transaction_ops(entry['transactions'][tnum]['ops'], enum, tnum)
338 def check_transaction_ops(ops, enum, tnum):
340 logging.warning("No ops found in entry {e} trans {t}".format(e=enum, t=tnum))
342 for onum in range(len(ops)):
343 if 'op_num' not in ops[onum]:
344 logging.error("Key 'op_num' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum))
346 elif ops[onum]['op_num'] != onum:
347 fo = ops[onum]['op_num']
348 logging.error("Bad op_num ({fo}) from entry {e} trans {t} op {o}".format(fo=fo, e=enum, t=tnum, o=onum))
350 if 'op_name' not in ops[onum]:
351 logging.error("Key 'op_name' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum))
356 def test_dump_journal(CFSD_PREFIX, osds):
359 TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid)
362 # Test --op dump-journal by loading json
363 cmd = (CFSD_PREFIX + "--op dump-journal --format json").format(osd=osd)
365 tmpfd = open(TMPFILE, "wb")
366 ret = call(cmd, shell=True, stdout=tmpfd)
368 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
372 tmpfd = open(TMPFILE, "r")
373 jsondict = json.load(tmpfd)
377 journal_errors = check_journal(jsondict)
378 if journal_errors is not 0:
379 logging.error(jsondict)
380 ERRORS += journal_errors
384 CEPH_BUILD_DIR = os.environ.get('CEPH_BUILD_DIR')
385 CEPH_BIN = os.environ.get('CEPH_BIN')
386 CEPH_ROOT = os.environ.get('CEPH_ROOT')
388 if not CEPH_BUILD_DIR:
389 CEPH_BUILD_DIR=os.getcwd()
390 os.putenv('CEPH_BUILD_DIR', CEPH_BUILD_DIR)
391 CEPH_BIN=os.path.join(CEPH_BUILD_DIR, 'bin')
392 os.putenv('CEPH_BIN', CEPH_BIN)
393 CEPH_ROOT=os.path.dirname(CEPH_BUILD_DIR)
394 os.putenv('CEPH_ROOT', CEPH_ROOT)
395 CEPH_LIB=os.path.join(CEPH_BUILD_DIR, 'lib')
396 os.putenv('CEPH_LIB', CEPH_LIB)
401 pass # ok if this is already there
402 CEPH_DIR = os.path.join(CEPH_BUILD_DIR, os.path.join("td", "cot_dir"))
403 CEPH_CONF = os.path.join(CEPH_DIR, 'ceph.conf')
406 call("{path}/init-ceph -c {conf} stop > /dev/null 2>&1".format(conf=CEPH_CONF, path=CEPH_BIN), shell=True)
409 def check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME):
412 for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(SPLIT_NAME) == 0]:
413 nsfile = rawnsfile.split("__")[0]
414 clone = rawnsfile.split("__")[1]
415 nspace = nsfile.split("-")[0]
416 file = nsfile.split("-")[1] + "__" + clone
420 path = os.path.join(DATADIR, rawnsfile)
421 tmpfd = open(TMPFILE, "wb")
422 cmd = "find {dir} -name '{file}_*_{nspace}_*'".format(dir=OSDDIR, file=file, nspace=nspace)
424 ret = call(cmd, shell=True, stdout=tmpfd)
426 logging.critical("INTERNAL ERROR")
429 obj_locs = get_lines(TMPFILE)
430 if len(obj_locs) == 0:
431 logging.error("Can't find imported object {name}".format(name=file))
433 for obj_loc in obj_locs:
434 # For btrfs skip snap_* dirs
435 if re.search("/snap_[0-9]*/", obj_loc) is not None:
438 cmd = "diff -q {src} {obj_loc}".format(src=path, obj_loc=obj_loc)
440 ret = call(cmd, shell=True)
442 logging.error("{file} data not imported properly into {obj}".format(file=file, obj=obj_loc))
444 return ERRORS, repcount
447 def set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight):
448 # change the weight of osd.0 to math.pi in the newest osdmap of given osd
449 osdmap_file = tempfile.NamedTemporaryFile(delete=True)
450 cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path,
451 osdmap_file=osdmap_file.name)
452 output = check_output(cmd, shell=True)
453 epoch = int(re.findall('#(\d+)', output)[0])
455 new_crush_file = tempfile.NamedTemporaryFile(delete=True)
456 old_crush_file = tempfile.NamedTemporaryFile(delete=True)
457 ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
458 crush_file=old_crush_file.name, path=CEPH_BIN),
464 for osd_id in osd_ids:
465 cmd = "{path}/crushtool -i {crush_file} --reweight-item osd.{osd} {weight} -o {new_crush_file}".format(osd=osd_id,
466 crush_file=old_crush_file.name,
468 new_crush_file=new_crush_file.name, path=CEPH_BIN)
469 ret = call(cmd, stdout=DEVNULL, shell=True)
471 old_crush_file, new_crush_file = new_crush_file, old_crush_file
473 # change them back, since we don't need to preapre for another round
474 old_crush_file, new_crush_file = new_crush_file, old_crush_file
475 old_crush_file.close()
477 ret = call("{path}/osdmaptool --import-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
478 crush_file=new_crush_file.name, path=CEPH_BIN),
484 # Minimum test of --dry-run by using it, but not checking anything
485 cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force --dry-run"
486 cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch)
487 ret = call(cmd, stdout=DEVNULL, shell=True)
490 # osdmaptool increases the epoch of the changed osdmap, so we need to force the tool
491 # to use use a different epoch than the one in osdmap
492 cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force"
493 cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch)
494 ret = call(cmd, stdout=DEVNULL, shell=True)
498 def get_osd_weights(CFSD_PREFIX, osd_ids, osd_path):
499 osdmap_file = tempfile.NamedTemporaryFile(delete=True)
500 cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path,
501 osdmap_file=osdmap_file.name)
502 ret = call(cmd, stdout=DEVNULL, shell=True)
505 # we have to read the weights from the crush map, even we can query the weights using
506 # osdmaptool, but please keep in mind, they are different:
507 # item weights in crush map versus weight associated with each osd in osdmap
508 crush_file = tempfile.NamedTemporaryFile(delete=True)
509 ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
510 crush_file=crush_file.name, path=CEPH_BIN),
514 output = check_output("{path}/crushtool --tree -i {crush_file} | tail -n {num_osd}".format(crush_file=crush_file.name,
515 num_osd=len(osd_ids), path=CEPH_BIN),
519 for line in output.strip().split('\n'):
521 linev = re.split('\s+', line)
524 print('linev %s' % linev)
525 weights.append(float(linev[2]))
530 def test_get_set_osdmap(CFSD_PREFIX, osd_ids, osd_paths):
531 print("Testing get-osdmap and set-osdmap")
534 weight = 1 / math.e # just some magic number in [0, 1]
536 for osd_path in osd_paths:
537 if set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight):
538 changed.append(osd_path)
540 logging.warning("Failed to change the weights: {0}".format(osd_path))
541 # i am pissed off if none of the store gets changed
545 for osd_path in changed:
546 weights = get_osd_weights(CFSD_PREFIX, osd_ids, osd_path)
550 if any(abs(w - weight) > 1e-5 for w in weights):
551 logging.warning("Weight is not changed: {0} != {1}".format(weights, weight))
555 def test_get_set_inc_osdmap(CFSD_PREFIX, osd_path):
556 # incrementals are not used unless we need to build an MOSDMap to update
557 # OSD's peers, so an obvious way to test it is simply overwrite an epoch
558 # with a different copy, and read it back to see if it matches.
560 file_e2 = tempfile.NamedTemporaryFile(delete=True)
561 cmd = (CFSD_PREFIX + "--op get-inc-osdmap --file {file}").format(osd=osd_path,
563 output = check_output(cmd, shell=True)
564 epoch = int(re.findall('#(\d+)', output)[0])
565 # backup e1 incremental before overwriting it
567 file_e1_backup = tempfile.NamedTemporaryFile(delete=True)
568 cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}"
569 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
571 # overwrite e1 with e2
572 cmd = CFSD_PREFIX + "--op set-inc-osdmap --force --epoch {epoch} --file {file}"
573 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e2.name), shell=True)
575 # Use dry-run to set back to e1 which shouldn't happen
576 cmd = CFSD_PREFIX + "--op set-inc-osdmap --dry-run --epoch {epoch} --file {file}"
577 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
580 file_e1_read = tempfile.NamedTemporaryFile(delete=True)
581 cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}"
582 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_read.name), shell=True)
586 if not filecmp.cmp(file_e2.name, file_e1_read.name, shallow=False):
587 logging.error("{{get,set}}-inc-osdmap mismatch {0} != {1}".format(file_e2.name, file_e1_read.name))
590 # revert the change with file_e1_backup
591 cmd = CFSD_PREFIX + "--op set-inc-osdmap --epoch {epoch} --file {file}"
592 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
594 logging.error("Failed to revert the changed inc-osdmap")
600 def test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS):
602 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
603 nullfd = open(os.devnull, "w")
605 print("Test removeall")
607 for nspace in db.keys():
608 for basename in db[nspace].keys():
609 JSON = db[nspace][basename]['json']
611 OSDS = get_osds(pg, OSDDIR)
613 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
614 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
615 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
619 if int(basename.split(REP_NAME)[1]) <= int(NUM_CLONED_REP_OBJECTS):
620 cmd = (CFSD_PREFIX + "'{json}' remove").format(osd=osd, json=JSON)
621 errors += test_failure(cmd, "Snapshots are present, use removeall to delete everything")
623 cmd = (CFSD_PREFIX + " --force --dry-run '{json}' remove").format(osd=osd, json=JSON)
625 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
627 logging.error("remove with --force failed for {json}".format(json=JSON))
630 cmd = (CFSD_PREFIX + " --dry-run '{json}' removeall").format(osd=osd, json=JSON)
632 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
634 logging.error("removeall failed for {json}".format(json=JSON))
637 cmd = (CFSD_PREFIX + " '{json}' removeall").format(osd=osd, json=JSON)
639 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
641 logging.error("removeall failed for {json}".format(json=JSON))
644 tmpfd = open(TMPFILE, "w")
645 cmd = (CFSD_PREFIX + "--op list --pgid {pg} --namespace {ns} {name}").format(osd=osd, pg=pg, ns=nspace, name=basename)
647 ret = call(cmd, shell=True, stdout=tmpfd)
649 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
652 lines = get_lines(TMPFILE)
654 logging.error("Removeall didn't remove all objects {ns}/{name} : {lines}".format(ns=nspace, name=basename, lines=lines))
658 cmd = "{path}/rados -p {pool} rmsnap snap1".format(pool=REP_POOL, path=CEPH_BIN)
660 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
662 logging.error("rados rmsnap failed")
670 if sys.version_info[0] < 3:
671 sys.stdout = stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0)
673 stdout = sys.stdout.buffer
674 if len(argv) > 1 and argv[1] == "debug":
679 call("rm -fr {dir}; mkdir -p {dir}".format(dir=CEPH_DIR), shell=True)
681 os.environ["CEPH_DIR"] = CEPH_DIR
683 REP_POOL = "rep_pool"
684 REP_NAME = "REPobject"
687 if len(argv) > 0 and argv[0] == 'large':
689 NUM_REP_OBJECTS = 800
690 NUM_CLONED_REP_OBJECTS = 100
693 # Larger data sets for first object per namespace
694 DATALINECOUNT = 50000
695 # Number of objects to do xattr/omap testing on
700 NUM_CLONED_REP_OBJECTS = 2
703 # Larger data sets for first object per namespace
705 # Number of objects to do xattr/omap testing on
709 TESTDIR = "/tmp/test.{pid}".format(pid=pid)
710 DATADIR = "/tmp/data.{pid}".format(pid=pid)
711 CFSD_PREFIX = CEPH_BIN + "/ceph-objectstore-tool --data-path " + OSDDIR + "/{osd} "
712 PROFNAME = "testecprofile"
714 os.environ['CEPH_CONF'] = CEPH_CONF
718 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=REP_POOL, pg=PG_COUNT, path=CEPH_BIN)
720 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
722 REPID = get_pool_id(REP_POOL, nullfd)
724 print("Created Replicated pool #{repid}".format(repid=REPID))
726 cmd = "{path}/ceph osd erasure-code-profile set {prof} crush-failure-domain=osd".format(prof=PROFNAME, path=CEPH_BIN)
728 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
729 cmd = "{path}/ceph osd erasure-code-profile get {prof}".format(prof=PROFNAME, path=CEPH_BIN)
731 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
732 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} erasure {prof}".format(pool=EC_POOL, prof=PROFNAME, pg=PG_COUNT, path=CEPH_BIN)
734 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
735 ECID = get_pool_id(EC_POOL, nullfd)
737 print("Created Erasure coded pool #{ecid}".format(ecid=ECID))
739 print("Creating {objs} objects in replicated pool".format(objs=(NUM_REP_OBJECTS*NUM_NSPACES)))
740 cmd = "mkdir -p {datadir}".format(datadir=DATADIR)
742 call(cmd, shell=True)
746 objects = range(1, NUM_REP_OBJECTS + 1)
747 nspaces = range(NUM_NSPACES)
749 nspace = get_nspace(n)
754 NAME = REP_NAME + "{num}".format(num=i)
755 LNAME = nspace + "-" + NAME
756 DDNAME = os.path.join(DATADIR, LNAME)
759 cmd = "rm -f " + DDNAME
761 call(cmd, shell=True)
764 dataline = range(DATALINECOUNT)
767 fd = open(DDNAME, "w")
768 data = "This is the replicated data for " + LNAME + "\n"
773 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
775 ret = call(cmd, shell=True, stderr=nullfd)
777 logging.critical("Rados put command failed with {ret}".format(ret=ret))
780 db[nspace][NAME] = {}
782 if i < ATTR_OBJS + 1:
786 db[nspace][NAME]["xattr"] = {}
790 mykey = "key{i}-{k}".format(i=i, k=k)
791 myval = "val{i}-{k}".format(i=i, k=k)
792 cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
794 ret = call(cmd, shell=True)
796 logging.error("setxattr failed with {ret}".format(ret=ret))
798 db[nspace][NAME]["xattr"][mykey] = myval
800 # Create omap header in all objects but REPobject1
801 if i < ATTR_OBJS + 1 and i != 1:
802 myhdr = "hdr{i}".format(i=i)
803 cmd = "{path}/rados -p {pool} -N '{nspace}' setomapheader {name} {hdr}".format(pool=REP_POOL, name=NAME, hdr=myhdr, nspace=nspace, path=CEPH_BIN)
805 ret = call(cmd, shell=True)
807 logging.critical("setomapheader failed with {ret}".format(ret=ret))
809 db[nspace][NAME]["omapheader"] = myhdr
811 db[nspace][NAME]["omap"] = {}
815 mykey = "okey{i}-{k}".format(i=i, k=k)
816 myval = "oval{i}-{k}".format(i=i, k=k)
817 cmd = "{path}/rados -p {pool} -N '{nspace}' setomapval {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
819 ret = call(cmd, shell=True)
821 logging.critical("setomapval failed with {ret}".format(ret=ret))
822 db[nspace][NAME]["omap"][mykey] = myval
825 cmd = "{path}/rados -p {pool} mksnap snap1".format(pool=REP_POOL, path=CEPH_BIN)
827 call(cmd, shell=True)
829 objects = range(1, NUM_CLONED_REP_OBJECTS + 1)
830 nspaces = range(NUM_NSPACES)
832 nspace = get_nspace(n)
835 NAME = REP_NAME + "{num}".format(num=i)
836 LNAME = nspace + "-" + NAME
837 DDNAME = os.path.join(DATADIR, LNAME)
839 CLONENAME = DDNAME + "__1"
842 cmd = "mv -f " + DDNAME + " " + CLONENAME
844 call(cmd, shell=True)
847 dataline = range(DATALINECOUNT)
850 fd = open(DDNAME, "w")
851 data = "This is the replicated data after a snapshot for " + LNAME + "\n"
856 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
858 ret = call(cmd, shell=True, stderr=nullfd)
860 logging.critical("Rados put command failed with {ret}".format(ret=ret))
863 print("Creating {objs} objects in erasure coded pool".format(objs=(NUM_EC_OBJECTS*NUM_NSPACES)))
865 objects = range(1, NUM_EC_OBJECTS + 1)
866 nspaces = range(NUM_NSPACES)
868 nspace = get_nspace(n)
871 NAME = EC_NAME + "{num}".format(num=i)
872 LNAME = nspace + "-" + NAME
873 DDNAME = os.path.join(DATADIR, LNAME)
876 cmd = "rm -f " + DDNAME
878 call(cmd, shell=True)
881 dataline = range(DATALINECOUNT)
884 fd = open(DDNAME, "w")
885 data = "This is the erasure coded data for " + LNAME + "\n"
890 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=EC_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
892 ret = call(cmd, shell=True, stderr=nullfd)
894 logging.critical("Erasure coded pool creation failed with {ret}".format(ret=ret))
897 db[nspace][NAME] = {}
899 db[nspace][NAME]["xattr"] = {}
900 if i < ATTR_OBJS + 1:
907 mykey = "key{i}-{k}".format(i=i, k=k)
908 myval = "val{i}-{k}".format(i=i, k=k)
909 cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=EC_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
911 ret = call(cmd, shell=True)
913 logging.error("setxattr failed with {ret}".format(ret=ret))
915 db[nspace][NAME]["xattr"][mykey] = myval
917 # Omap isn't supported in EC pools
918 db[nspace][NAME]["omap"] = {}
925 logging.critical("Unable to set up test")
928 ALLREPPGS = get_pgs(OSDDIR, REPID)
929 logging.debug(ALLREPPGS)
930 ALLECPGS = get_pgs(OSDDIR, ECID)
931 logging.debug(ALLECPGS)
933 OBJREPPGS = get_objs(ALLREPPGS, REP_NAME, OSDDIR, REPID)
934 logging.debug(OBJREPPGS)
935 OBJECPGS = get_objs(ALLECPGS, EC_NAME, OSDDIR, ECID)
936 logging.debug(OBJECPGS)
940 osds = get_osds(ONEPG, OSDDIR)
942 logging.debug(ONEOSD)
944 print("Test invalid parameters")
945 # On export can't use stdout to a terminal
946 cmd = (CFSD_PREFIX + "--op export --pgid {pg}").format(osd=ONEOSD, pg=ONEPG)
947 ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True)
949 # On export can't use stdout to a terminal
950 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG)
951 ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True)
953 # Prep a valid ec export file for import failure tests
954 ONEECPG = ALLECPGS[0]
955 osds = get_osds(ONEECPG, OSDDIR)
957 OTHERFILE = "/tmp/foo.{pid}".format(pid=pid)
958 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEECOSD, pg=ONEECPG, file=OTHERFILE)
960 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
962 # On import can't specify a different shard
963 BADPG = ONEECPG.split('s')[0] + "s10"
964 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEECOSD, pg=BADPG, file=OTHERFILE)
965 ERRORS += test_failure(cmd, "Can't specify a different shard, must be")
969 # Prep a valid export file for import failure tests
970 OTHERFILE = "/tmp/foo.{pid}".format(pid=pid)
971 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEOSD, pg=ONEPG, file=OTHERFILE)
973 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
975 # On import can't specify a PG with a non-existent pool
976 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEOSD, pg="10.0", file=OTHERFILE)
977 ERRORS += test_failure(cmd, "Can't specify a different pgid pool, must be")
979 # On import can't specify shard for a replicated export
980 cmd = (CFSD_PREFIX + "--op import --pgid {pg}s0 --file {file}").format(osd=ONEOSD, pg=ONEPG, file=OTHERFILE)
981 ERRORS += test_failure(cmd, "Can't specify a sharded pgid with a non-sharded export")
983 # On import can't specify a PG with a bad seed
984 TMPPG="{pool}.80".format(pool=REPID)
985 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEOSD, pg=TMPPG, file=OTHERFILE)
986 ERRORS += test_failure(cmd, "Illegal pgid, the seed is larger than current pg_num")
989 cmd = (CFSD_PREFIX + "--op import --file {FOO}").format(osd=ONEOSD, FOO=OTHERFILE)
990 ERRORS += test_failure(cmd, "file: {FOO}: No such file or directory".format(FOO=OTHERFILE))
992 cmd = "{path}/ceph-objectstore-tool --data-path BAD_DATA_PATH --op list".format(osd=ONEOSD, path=CEPH_BIN)
993 ERRORS += test_failure(cmd, "data-path: BAD_DATA_PATH: No such file or directory")
995 cmd = "{path}/ceph-objectstore-tool --journal-path BAD_JOURNAL_PATH --op dump-journal".format(path=CEPH_BIN)
996 ERRORS += test_failure(cmd, "journal-path: BAD_JOURNAL_PATH: (2) No such file or directory")
998 cmd = (CFSD_PREFIX + "--journal-path BAD_JOURNAL_PATH --op list").format(osd=ONEOSD)
999 ERRORS += test_failure(cmd, "journal-path: BAD_JOURNAL_PATH: No such file or directory")
1001 cmd = (CFSD_PREFIX + "--journal-path /bin --op list").format(osd=ONEOSD)
1002 ERRORS += test_failure(cmd, "journal-path: /bin: (21) Is a directory")
1004 # On import can't use stdin from a terminal
1005 cmd = (CFSD_PREFIX + "--op import --pgid {pg}").format(osd=ONEOSD, pg=ONEPG)
1006 ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True)
1008 # On import can't use stdin from a terminal
1009 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG)
1010 ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True)
1012 # Specify a bad --type
1013 os.mkdir(OSDDIR + "/fakeosd")
1014 cmd = ("{path}/ceph-objectstore-tool --data-path " + OSDDIR + "/{osd} --type foobar --op list --pgid {pg}").format(osd="fakeosd", pg=ONEPG, path=CEPH_BIN)
1015 ERRORS += test_failure(cmd, "Unable to create store of type foobar")
1017 # Don't specify a data-path
1018 cmd = "{path}/ceph-objectstore-tool --type memstore --op list --pgid {pg}".format(dir=OSDDIR, osd=ONEOSD, pg=ONEPG, path=CEPH_BIN)
1019 ERRORS += test_failure(cmd, "Must provide --data-path")
1021 cmd = (CFSD_PREFIX + "--op remove --pgid 2.0").format(osd=ONEOSD)
1022 ERRORS += test_failure(cmd, "Please use export-remove or you must use --force option")
1024 cmd = (CFSD_PREFIX + "--force --op remove").format(osd=ONEOSD)
1025 ERRORS += test_failure(cmd, "Must provide pgid")
1027 # Don't secify a --op nor object command
1028 cmd = CFSD_PREFIX.format(osd=ONEOSD)
1029 ERRORS += test_failure(cmd, "Must provide --op or object command...")
1031 # Specify a bad --op command
1032 cmd = (CFSD_PREFIX + "--op oops").format(osd=ONEOSD)
1033 ERRORS += test_failure(cmd, "Must provide --op (info, log, remove, mkfs, fsck, export, export-remove, import, list, fix-lost, list-pgs, rm-past-intervals, dump-journal, dump-super, meta-list, get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete)")
1035 # Provide just the object param not a command
1036 cmd = (CFSD_PREFIX + "object").format(osd=ONEOSD)
1037 ERRORS += test_failure(cmd, "Invalid syntax, missing command")
1039 # Provide an object name that doesn't exist
1040 cmd = (CFSD_PREFIX + "NON_OBJECT get-bytes").format(osd=ONEOSD)
1041 ERRORS += test_failure(cmd, "No object id 'NON_OBJECT' found")
1043 # Provide an invalid object command
1044 cmd = (CFSD_PREFIX + "--pgid {pg} '' notacommand").format(osd=ONEOSD, pg=ONEPG)
1045 ERRORS += test_failure(cmd, "Unknown object command 'notacommand'")
1047 cmd = (CFSD_PREFIX + "foo list-omap").format(osd=ONEOSD, pg=ONEPG)
1048 ERRORS += test_failure(cmd, "No object id 'foo' found or invalid JSON specified")
1050 cmd = (CFSD_PREFIX + "'{{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}}' list-omap").format(osd=ONEOSD, pg=ONEPG)
1051 ERRORS += test_failure(cmd, "Without --pgid the object '{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}' must be a JSON array")
1053 cmd = (CFSD_PREFIX + "'[]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1054 ERRORS += test_failure(cmd, "Object '[]' must be a JSON array with 2 elements")
1056 cmd = (CFSD_PREFIX + "'[\"1.0\"]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1057 ERRORS += test_failure(cmd, "Object '[\"1.0\"]' must be a JSON array with 2 elements")
1059 cmd = (CFSD_PREFIX + "'[\"1.0\", 5, 8, 9]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1060 ERRORS += test_failure(cmd, "Object '[\"1.0\", 5, 8, 9]' must be a JSON array with 2 elements")
1062 cmd = (CFSD_PREFIX + "'[1, 2]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1063 ERRORS += test_failure(cmd, "Object '[1, 2]' must be a JSON array with the first element a string")
1065 cmd = (CFSD_PREFIX + "'[\"1.3\",{{\"snapid\":\"not an int\"}}]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1066 ERRORS += test_failure(cmd, "Decode object JSON error: value type is 2 not 4")
1068 TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid)
1069 ALLPGS = OBJREPPGS + OBJECPGS
1070 OSDS = get_osds(ALLPGS[0], OSDDIR)
1073 print("Test all --op dump-journal")
1074 ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]
1075 ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS)
1077 # Test --op list and generate json for all objects
1078 print("Test --op list variants")
1080 # retrieve all objects from all PGs
1081 tmpfd = open(TMPFILE, "wb")
1082 cmd = (CFSD_PREFIX + "--op list --format json").format(osd=osd)
1084 ret = call(cmd, shell=True, stdout=tmpfd)
1086 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1089 lines = get_lines(TMPFILE)
1090 JSONOBJ = sorted(set(lines))
1091 (pgid, coll, jsondict) = json.loads(JSONOBJ[0])[0]
1093 # retrieve all objects in a given PG
1094 tmpfd = open(OTHERFILE, "ab")
1095 cmd = (CFSD_PREFIX + "--op list --pgid {pg} --format json").format(osd=osd, pg=pgid)
1097 ret = call(cmd, shell=True, stdout=tmpfd)
1099 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1102 lines = get_lines(OTHERFILE)
1103 JSONOBJ = sorted(set(lines))
1104 (other_pgid, other_coll, other_jsondict) = json.loads(JSONOBJ[0])[0]
1106 if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll:
1107 logging.error("the first line of --op list is different "
1108 "from the first line of --op list --pgid {pg}".format(pg=pgid))
1111 # retrieve all objects with a given name in a given PG
1112 tmpfd = open(OTHERFILE, "wb")
1113 cmd = (CFSD_PREFIX + "--op list --pgid {pg} {object} --format json").format(osd=osd, pg=pgid, object=jsondict['oid'])
1115 ret = call(cmd, shell=True, stdout=tmpfd)
1117 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1120 lines = get_lines(OTHERFILE)
1121 JSONOBJ = sorted(set(lines))
1122 (other_pgid, other_coll, other_jsondict) in json.loads(JSONOBJ[0])[0]
1124 if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll:
1125 logging.error("the first line of --op list is different "
1126 "from the first line of --op list --pgid {pg} {object}".format(pg=pgid, object=jsondict['oid']))
1129 print("Test --op list by generating json for all objects using default format")
1131 OSDS = get_osds(pg, OSDDIR)
1133 tmpfd = open(TMPFILE, "ab")
1134 cmd = (CFSD_PREFIX + "--op list --pgid {pg}").format(osd=osd, pg=pg)
1136 ret = call(cmd, shell=True, stdout=tmpfd)
1138 logging.error("Bad exit status {ret} from --op list request".format(ret=ret))
1142 lines = get_lines(TMPFILE)
1143 JSONOBJ = sorted(set(lines))
1144 for JSON in JSONOBJ:
1145 (pgid, jsondict) = json.loads(JSON)
1146 # Skip clones for now
1147 if jsondict['snapid'] != -2:
1149 db[jsondict['namespace']][jsondict['oid']]['json'] = json.dumps((pgid, jsondict))
1150 # print db[jsondict['namespace']][jsondict['oid']]['json']
1151 if jsondict['oid'].find(EC_NAME) == 0 and 'shard_id' not in jsondict:
1152 logging.error("Malformed JSON {json}".format(json=JSON))
1156 print("Test get-bytes and set-bytes")
1157 for nspace in db.keys():
1158 for basename in db[nspace].keys():
1159 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1160 JSON = db[nspace][basename]['json']
1161 GETNAME = "/tmp/getbytes.{pid}".format(pid=pid)
1162 TESTNAME = "/tmp/testbytes.{pid}".format(pid=pid)
1163 SETNAME = "/tmp/setbytes.{pid}".format(pid=pid)
1164 BADNAME = "/tmp/badbytes.{pid}".format(pid=pid)
1165 for pg in OBJREPPGS:
1166 OSDS = get_osds(pg, OSDDIR)
1168 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1169 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1170 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1177 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-bytes {fname}").format(osd=osd, pg=pg, json=JSON, fname=GETNAME)
1179 ret = call(cmd, shell=True)
1181 logging.error("Bad exit status {ret}".format(ret=ret))
1184 cmd = "diff -q {file} {getfile}".format(file=file, getfile=GETNAME)
1185 ret = call(cmd, shell=True)
1187 logging.error("Data from get-bytes differ")
1188 logging.debug("Got:")
1189 cat_file(logging.DEBUG, GETNAME)
1190 logging.debug("Expected:")
1191 cat_file(logging.DEBUG, file)
1193 fd = open(SETNAME, "w")
1194 data = "put-bytes going into {file}\n".format(file=file)
1197 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=SETNAME)
1199 ret = call(cmd, shell=True)
1201 logging.error("Bad exit status {ret} from set-bytes".format(ret=ret))
1203 fd = open(TESTNAME, "wb")
1204 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON)
1206 ret = call(cmd, shell=True, stdout=fd)
1209 logging.error("Bad exit status {ret} from get-bytes".format(ret=ret))
1211 cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME)
1213 ret = call(cmd, shell=True)
1215 logging.error("Data after set-bytes differ")
1216 logging.debug("Got:")
1217 cat_file(logging.DEBUG, TESTNAME)
1218 logging.debug("Expected:")
1219 cat_file(logging.DEBUG, SETNAME)
1222 # Use set-bytes with --dry-run and make sure contents haven't changed
1223 fd = open(BADNAME, "w")
1224 data = "Bad data for --dry-run in {file}\n".format(file=file)
1227 cmd = (CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=BADNAME)
1229 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1231 logging.error("Bad exit status {ret} from set-bytes --dry-run".format(ret=ret))
1233 fd = open(TESTNAME, "wb")
1234 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON)
1236 ret = call(cmd, shell=True, stdout=fd)
1239 logging.error("Bad exit status {ret} from get-bytes".format(ret=ret))
1241 cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME)
1243 ret = call(cmd, shell=True)
1245 logging.error("Data after set-bytes --dry-run changed!")
1246 logging.debug("Got:")
1247 cat_file(logging.DEBUG, TESTNAME)
1248 logging.debug("Expected:")
1249 cat_file(logging.DEBUG, SETNAME)
1252 fd = open(file, "rb")
1253 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes").format(osd=osd, pg=pg, json=JSON)
1255 ret = call(cmd, shell=True, stdin=fd)
1257 logging.error("Bad exit status {ret} from set-bytes to restore object".format(ret=ret))
1278 # Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap
1279 print("Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap")
1280 for nspace in db.keys():
1281 for basename in db[nspace].keys():
1282 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1283 JSON = db[nspace][basename]['json']
1284 for pg in OBJREPPGS:
1285 OSDS = get_osds(pg, OSDDIR)
1287 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1288 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1289 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1292 for key, val in db[nspace][basename]["xattr"].items():
1294 cmd = (CFSD_PREFIX + " '{json}' get-attr {key}").format(osd=osd, json=JSON, key=attrkey)
1296 getval = check_output(cmd, shell=True)
1298 logging.error("get-attr of key {key} returned wrong val: {get} instead of {orig}".format(key=attrkey, get=getval, orig=val))
1301 # set-attr to bogus value "foobar"
1302 cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1304 ret = call(cmd, shell=True)
1306 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1309 # Test set-attr with dry-run
1310 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1312 ret = call(cmd, shell=True, stdout=nullfd)
1314 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1317 # Check the set-attr
1318 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1320 getval = check_output(cmd, shell=True)
1322 logging.error("Bad exit status {ret} from get-attr".format(ret=ret))
1325 if getval != "foobar":
1326 logging.error("Check of set-attr failed because we got {val}".format(val=getval))
1330 cmd = (CFSD_PREFIX + "'{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1332 ret = call(cmd, shell=True)
1334 logging.error("Bad exit status {ret} from rm-attr".format(ret=ret))
1337 # Check rm-attr with dry-run
1338 cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1340 ret = call(cmd, shell=True, stdout=nullfd)
1342 logging.error("Bad exit status {ret} from rm-attr".format(ret=ret))
1345 cmd = (CFSD_PREFIX + "'{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1347 ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd)
1349 logging.error("For rm-attr expect get-attr to fail, but it succeeded")
1352 cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey, val=val)
1354 ret = call(cmd, shell=True)
1356 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1360 hdr = db[nspace][basename].get("omapheader", "")
1361 cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, json=JSON)
1363 gethdr = check_output(cmd, shell=True)
1365 logging.error("get-omaphdr was wrong: {get} instead of {orig}".format(get=gethdr, orig=hdr))
1368 # set-omaphdr to bogus value "foobar"
1369 cmd = ("echo -n foobar | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON)
1371 ret = call(cmd, shell=True)
1373 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1376 # Check the set-omaphdr
1377 cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, pg=pg, json=JSON)
1379 gethdr = check_output(cmd, shell=True)
1381 logging.error("Bad exit status {ret} from get-omaphdr".format(ret=ret))
1384 if gethdr != "foobar":
1385 logging.error("Check of set-omaphdr failed because we got {val}".format(val=getval))
1388 # Test dry-run with set-omaphdr
1389 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON)
1391 ret = call(cmd, shell=True, stdout=nullfd)
1393 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1397 cmd = ("echo -n {val} | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON, val=hdr)
1399 ret = call(cmd, shell=True)
1401 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1405 for omapkey, val in db[nspace][basename]["omap"].items():
1406 cmd = (CFSD_PREFIX + " '{json}' get-omap {key}").format(osd=osd, json=JSON, key=omapkey)
1408 getval = check_output(cmd, shell=True)
1410 logging.error("get-omap of key {key} returned wrong val: {get} instead of {orig}".format(key=omapkey, get=getval, orig=val))
1413 # set-omap to bogus value "foobar"
1414 cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1416 ret = call(cmd, shell=True)
1418 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1421 # Check set-omap with dry-run
1422 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1424 ret = call(cmd, shell=True, stdout=nullfd)
1426 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1429 # Check the set-omap
1430 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1432 getval = check_output(cmd, shell=True)
1434 logging.error("Bad exit status {ret} from get-omap".format(ret=ret))
1437 if getval != "foobar":
1438 logging.error("Check of set-omap failed because we got {val}".format(val=getval))
1442 cmd = (CFSD_PREFIX + "'{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1444 ret = call(cmd, shell=True)
1446 logging.error("Bad exit status {ret} from rm-omap".format(ret=ret))
1448 # Check rm-omap with dry-run
1449 cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1451 ret = call(cmd, shell=True, stdout=nullfd)
1453 logging.error("Bad exit status {ret} from rm-omap".format(ret=ret))
1455 cmd = (CFSD_PREFIX + "'{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1457 ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd)
1459 logging.error("For rm-omap expect get-omap to fail, but it succeeded")
1462 cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey, val=val)
1464 ret = call(cmd, shell=True)
1466 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1472 for nspace in db.keys():
1473 for basename in db[nspace].keys():
1474 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1475 JSON = db[nspace][basename]['json']
1476 GETNAME = "/tmp/getbytes.{pid}".format(pid=pid)
1477 for pg in OBJREPPGS:
1478 OSDS = get_osds(pg, OSDDIR)
1480 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1481 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1482 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1485 if int(basename.split(REP_NAME)[1]) > int(NUM_CLONED_REP_OBJECTS):
1487 cmd = (CFSD_PREFIX + " '{json}' dump | grep '\"snap\": 1,' > /dev/null").format(osd=osd, json=JSON)
1489 ret = call(cmd, shell=True)
1491 logging.error("Invalid dump for {json}".format(json=JSON))
1494 print("Test list-attrs get-attr")
1495 ATTRFILE = r"/tmp/attrs.{pid}".format(pid=pid)
1496 VALFILE = r"/tmp/val.{pid}".format(pid=pid)
1497 for nspace in db.keys():
1498 for basename in db[nspace].keys():
1499 file = os.path.join(DATADIR, nspace + "-" + basename)
1500 JSON = db[nspace][basename]['json']
1501 jsondict = json.loads(JSON)
1503 if 'shard_id' in jsondict:
1504 logging.debug("ECobject " + JSON)
1507 OSDS = get_osds(pg, OSDDIR)
1508 # Fix shard_id since we only have one json instance for each object
1509 jsondict['shard_id'] = int(pg.split('s')[1])
1510 JSON = json.dumps(jsondict)
1512 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-attr hinfo_key").format(osd=osd, pg=pg, json=JSON)
1513 logging.debug("TRY: " + cmd)
1515 out = check_output(cmd, shell=True, stderr=subprocess.STDOUT)
1516 logging.debug("FOUND: {json} in {osd} has value '{val}'".format(osd=osd, json=JSON, val=out))
1518 except subprocess.CalledProcessError as e:
1519 if "No such file or directory" not in e.output and "No data available" not in e.output:
1521 # Assuming k=2 m=1 for the default ec pool
1523 logging.error("{json} hinfo_key found {found} times instead of 3".format(json=JSON, found=found))
1527 # Make sure rep obj with rep pg or ec obj with ec pg
1528 if ('shard_id' in jsondict) != (pg.find('s') > 0):
1530 if 'shard_id' in jsondict:
1531 # Fix shard_id since we only have one json instance for each object
1532 jsondict['shard_id'] = int(pg.split('s')[1])
1533 JSON = json.dumps(jsondict)
1534 OSDS = get_osds(pg, OSDDIR)
1536 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1537 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1538 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1541 afd = open(ATTRFILE, "wb")
1542 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' list-attrs").format(osd=osd, pg=pg, json=JSON)
1544 ret = call(cmd, shell=True, stdout=afd)
1547 logging.error("list-attrs failed with {ret}".format(ret=ret))
1550 keys = get_lines(ATTRFILE)
1551 values = dict(db[nspace][basename]["xattr"])
1553 if key == "_" or key == "snapset" or key == "hinfo_key":
1555 key = key.strip("_")
1556 if key not in values:
1557 logging.error("Unexpected key {key} present".format(key=key))
1560 exp = values.pop(key)
1561 vfd = open(VALFILE, "wb")
1562 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key="_" + key)
1564 ret = call(cmd, shell=True, stdout=vfd)
1567 logging.error("get-attr failed with {ret}".format(ret=ret))
1570 lines = get_lines(VALFILE)
1573 logging.error("For key {key} got value {got} instead of {expected}".format(key=key, got=val, expected=exp))
1575 if len(values) != 0:
1576 logging.error("Not all keys found, remaining keys:")
1579 print("Test --op meta-list")
1580 tmpfd = open(TMPFILE, "wb")
1581 cmd = (CFSD_PREFIX + "--op meta-list").format(osd=ONEOSD)
1583 ret = call(cmd, shell=True, stdout=tmpfd)
1585 logging.error("Bad exit status {ret} from --op meta-list request".format(ret=ret))
1588 print("Test get-bytes on meta")
1590 lines = get_lines(TMPFILE)
1591 JSONOBJ = sorted(set(lines))
1592 for JSON in JSONOBJ:
1593 (pgid, jsondict) = json.loads(JSON)
1595 logging.error("pgid incorrect for --op meta-list {pgid}".format(pgid=pgid))
1597 if jsondict['namespace'] != "":
1598 logging.error("namespace non null --op meta-list {ns}".format(ns=jsondict['namespace']))
1605 cmd = (CFSD_PREFIX + "'{json}' get-bytes {fname}").format(osd=ONEOSD, json=JSON, fname=GETNAME)
1607 ret = call(cmd, shell=True)
1609 logging.error("Bad exit status {ret}".format(ret=ret))
1621 print("Test pg info")
1622 for pg in ALLREPPGS + ALLECPGS:
1623 for osd in get_osds(pg, OSDDIR):
1624 cmd = (CFSD_PREFIX + "--op info --pgid {pg} | grep '\"pgid\": \"{pg}\"'").format(osd=osd, pg=pg)
1626 ret = call(cmd, shell=True, stdout=nullfd)
1628 logging.error("Getting info failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1631 print("Test pg logging")
1632 if len(ALLREPPGS + ALLECPGS) == len(OBJREPPGS + OBJECPGS):
1633 logging.warning("All PGs have objects, so no log without modify entries")
1634 for pg in ALLREPPGS + ALLECPGS:
1635 for osd in get_osds(pg, OSDDIR):
1636 tmpfd = open(TMPFILE, "wb")
1637 cmd = (CFSD_PREFIX + "--op log --pgid {pg}").format(osd=osd, pg=pg)
1639 ret = call(cmd, shell=True, stdout=tmpfd)
1641 logging.error("Getting log failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1643 HASOBJ = pg in OBJREPPGS + OBJECPGS
1645 for line in get_lines(TMPFILE):
1646 if line.find("modify") != -1:
1649 if HASOBJ != MODOBJ:
1650 logging.error("Bad log for pg {pg} from {osd}".format(pg=pg, osd=osd))
1651 MSG = (HASOBJ and [""] or ["NOT "])[0]
1652 print("Log should {msg}have a modify entry".format(msg=MSG))
1660 print("Test list-pgs")
1661 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1663 CHECK_PGS = get_osd_pgs(os.path.join(OSDDIR, osd), None)
1664 CHECK_PGS = sorted(CHECK_PGS)
1666 cmd = (CFSD_PREFIX + "--op list-pgs").format(osd=osd)
1668 TEST_PGS = check_output(cmd, shell=True).split("\n")
1669 TEST_PGS = sorted(TEST_PGS)[1:] # Skip extra blank line
1671 if TEST_PGS != CHECK_PGS:
1672 logging.error("list-pgs got wrong result for osd.{osd}".format(osd=osd))
1673 logging.error("Expected {pgs}".format(pgs=CHECK_PGS))
1674 logging.error("Got {pgs}".format(pgs=TEST_PGS))
1678 print("Test pg export --dry-run")
1680 osd = get_osds(pg, OSDDIR)[0]
1681 fname = "/tmp/fname.{pid}".format(pid=pid)
1682 cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1684 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1686 logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1688 elif os.path.exists(fname):
1689 logging.error("Exporting --dry-run created file")
1692 cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname)
1694 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1696 logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1699 outdata = get_lines(fname)
1700 if len(outdata) > 0:
1701 logging.error("Exporting --dry-run to stdout not empty")
1702 logging.error("Data: " + outdata)
1706 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1707 os.mkdir(os.path.join(TESTDIR, osd))
1708 print("Test pg export")
1709 for pg in ALLREPPGS + ALLECPGS:
1710 for osd in get_osds(pg, OSDDIR):
1711 mydir = os.path.join(TESTDIR, osd)
1712 fname = os.path.join(mydir, pg)
1713 if pg == ALLREPPGS[0]:
1714 cmd = (CFSD_PREFIX + "--op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname)
1715 elif pg == ALLREPPGS[1]:
1716 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file - > {file}").format(osd=osd, pg=pg, file=fname)
1718 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1720 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1722 logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1725 ERRORS += EXP_ERRORS
1727 print("Test pg removal")
1729 for pg in ALLREPPGS + ALLECPGS:
1730 for osd in get_osds(pg, OSDDIR):
1731 # This should do nothing
1732 cmd = (CFSD_PREFIX + "--op remove --pgid {pg} --dry-run").format(pg=pg, osd=osd)
1734 ret = call(cmd, shell=True, stdout=nullfd)
1736 logging.error("Removing --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1738 cmd = (CFSD_PREFIX + "--force --op remove --pgid {pg}").format(pg=pg, osd=osd)
1740 ret = call(cmd, shell=True, stdout=nullfd)
1742 logging.error("Removing failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1748 if EXP_ERRORS == 0 and RM_ERRORS == 0:
1749 print("Test pg import")
1750 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1751 dir = os.path.join(TESTDIR, osd)
1752 PGS = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]
1754 file = os.path.join(dir, pg)
1755 # This should do nothing
1756 cmd = (CFSD_PREFIX + "--op import --file {file} --dry-run").format(osd=osd, file=file)
1758 ret = call(cmd, shell=True, stdout=nullfd)
1760 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1763 cmd = ("cat {file} |".format(file=file) + CFSD_PREFIX + "--op import").format(osd=osd)
1765 cmd = (CFSD_PREFIX + "--op import --file - --pgid {pg} < {file}").format(osd=osd, file=file, pg=pg)
1767 cmd = (CFSD_PREFIX + "--op import --file {file}").format(osd=osd, file=file)
1769 ret = call(cmd, shell=True, stdout=nullfd)
1771 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1774 logging.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")
1776 ERRORS += IMP_ERRORS
1779 if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
1780 print("Verify replicated import data")
1781 data_errors, _ = check_data(DATADIR, TMPFILE, OSDDIR, REP_NAME)
1782 ERRORS += data_errors
1784 logging.warning("SKIPPING CHECKING IMPORT DATA DUE TO PREVIOUS FAILURES")
1786 print("Test all --op dump-journal again")
1787 ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]
1788 ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS)
1793 if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
1794 print("Verify erasure coded import data")
1795 ERRORS += verify(DATADIR, EC_POOL, EC_NAME, db)
1796 # Check replicated data/xattr/omap using rados
1797 print("Verify replicated import data using rados")
1798 ERRORS += verify(DATADIR, REP_POOL, REP_NAME, db)
1801 NEWPOOL = "rados-import-pool"
1802 cmd = "{path}/rados mkpool {pool}".format(pool=NEWPOOL, path=CEPH_BIN)
1804 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1806 print("Test rados import")
1808 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1809 dir = os.path.join(TESTDIR, osd)
1810 for pg in [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]:
1811 if pg.find("{id}.".format(id=REPID)) != 0:
1813 file = os.path.join(dir, pg)
1816 # This should do nothing
1817 cmd = "{path}/rados import -p {pool} --dry-run {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1819 ret = call(cmd, shell=True, stdout=nullfd)
1821 logging.error("Rados import --dry-run failed from {file} with {ret}".format(file=file, ret=ret))
1823 cmd = "{path}/rados -p {pool} ls".format(pool=NEWPOOL, path=CEPH_BIN)
1825 data = check_output(cmd, shell=True)
1827 logging.error("'{data}'".format(data=data))
1828 logging.error("Found objects after dry-run")
1830 cmd = "{path}/rados import -p {pool} {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1832 ret = call(cmd, shell=True, stdout=nullfd)
1834 logging.error("Rados import failed from {file} with {ret}".format(file=file, ret=ret))
1836 cmd = "{path}/rados import -p {pool} --no-overwrite {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1838 ret = call(cmd, shell=True, stdout=nullfd)
1840 logging.error("Rados import --no-overwrite failed from {file} with {ret}".format(file=file, ret=ret))
1843 ERRORS += verify(DATADIR, NEWPOOL, REP_NAME, db)
1845 logging.warning("SKIPPING IMPORT-RADOS TESTS DUE TO PREVIOUS FAILURES")
1847 # Clear directories of previous portion
1848 call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
1849 call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
1853 # Cause SPLIT_POOL to split and test import with object/log filtering
1854 print("Testing import all objects after a split")
1855 SPLIT_POOL = "split_pool"
1858 SPLIT_NSPACE_COUNT = 2
1859 SPLIT_NAME = "split"
1860 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=SPLIT_POOL, pg=PG_COUNT, path=CEPH_BIN)
1862 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1863 SPLITID = get_pool_id(SPLIT_POOL, nullfd)
1864 pool_size = int(check_output("{path}/ceph osd pool get {pool} size".format(pool=SPLIT_POOL, path=CEPH_BIN), shell=True, stderr=nullfd).split(" ")[1])
1869 objects = range(1, SPLIT_OBJ_COUNT + 1)
1870 nspaces = range(SPLIT_NSPACE_COUNT)
1872 nspace = get_nspace(n)
1875 NAME = SPLIT_NAME + "{num}".format(num=i)
1876 LNAME = nspace + "-" + NAME
1877 DDNAME = os.path.join(DATADIR, LNAME)
1880 cmd = "rm -f " + DDNAME
1882 call(cmd, shell=True)
1885 dataline = range(DATALINECOUNT)
1888 fd = open(DDNAME, "w")
1889 data = "This is the split data for " + LNAME + "\n"
1894 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=SPLIT_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
1896 ret = call(cmd, shell=True, stderr=nullfd)
1898 logging.critical("Rados put command failed with {ret}".format(ret=ret))
1904 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1905 os.mkdir(os.path.join(TESTDIR, osd))
1907 pg = "{pool}.0".format(pool=SPLITID)
1910 export_osds = get_osds(pg, OSDDIR)
1911 for osd in export_osds:
1912 mydir = os.path.join(TESTDIR, osd)
1913 fname = os.path.join(mydir, pg)
1914 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1916 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1918 logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1921 ERRORS += EXP_ERRORS
1927 cmd = "{path}/ceph osd pool set {pool} pg_num 2".format(pool=SPLIT_POOL, path=CEPH_BIN)
1929 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1935 # Now 2 PGs, poolid.0 and poolid.1
1936 for seed in range(2):
1937 pg = "{pool}.{seed}".format(pool=SPLITID, seed=seed)
1940 for osd in get_osds(pg, OSDDIR):
1941 cmd = (CFSD_PREFIX + "--force --op remove --pgid {pg}").format(pg=pg, osd=osd)
1943 ret = call(cmd, shell=True, stdout=nullfd)
1945 # This is weird. The export files are based on only the EXPORT_PG
1946 # and where that pg was before the split. Use 'which' to use all
1947 # export copies in import.
1948 mydir = os.path.join(TESTDIR, export_osds[which])
1949 fname = os.path.join(mydir, EXPORT_PG)
1951 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1953 ret = call(cmd, shell=True, stdout=nullfd)
1955 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1958 ERRORS += IMP_ERRORS
1960 # Start up again to make sure imports didn't corrupt anything
1962 print("Verify split import data")
1963 data_errors, count = check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME)
1964 ERRORS += data_errors
1965 if count != (SPLIT_OBJ_COUNT * SPLIT_NSPACE_COUNT * pool_size):
1966 logging.error("Incorrect number of replicas seen {count}".format(count=count))
1971 call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
1972 call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
1974 ERRORS += test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS)
1976 # vstart() starts 4 OSDs
1977 ERRORS += test_get_set_osdmap(CFSD_PREFIX, list(range(4)), ALLOSDS)
1978 ERRORS += test_get_set_inc_osdmap(CFSD_PREFIX, ALLOSDS[0])
1981 CORES = [f for f in os.listdir(CEPH_DIR) if f.startswith("core.")]
1983 CORE_DIR = os.path.join("/tmp", "cores.{pid}".format(pid=os.getpid()))
1985 call("/bin/mv {ceph_dir}/core.* {core_dir}".format(ceph_dir=CEPH_DIR, core_dir=CORE_DIR), shell=True)
1986 logging.error("Failure due to cores found")
1987 logging.error("See {core_dir} for cores".format(core_dir=CORE_DIR))
1988 ERRORS += len(CORES)
1991 print("TEST PASSED")
1994 print("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))
1998 def remove_btrfs_subvolumes(path):
1999 if platform.system() == "FreeBSD":
2001 result = subprocess.Popen("stat -f -c '%%T' %s" % path, shell=True, stdout=subprocess.PIPE)
2002 for line in result.stdout:
2003 filesystem = decode(line).rstrip('\n')
2004 if filesystem == "btrfs":
2005 result = subprocess.Popen("sudo btrfs subvolume list %s" % path, shell=True, stdout=subprocess.PIPE)
2006 for line in result.stdout:
2007 subvolume = decode(line).split()[8]
2008 # extracting the relative volume name
2009 m = re.search(".*(%s.*)" % path, subvolume)
2012 call("sudo btrfs subvolume delete %s" % found, shell=True)
2015 if __name__ == "__main__":
2018 status = main(sys.argv[1:])
2021 os.chdir(CEPH_BUILD_DIR)
2022 remove_btrfs_subvolumes(CEPH_DIR)
2023 call("/bin/rm -fr {dir}".format(dir=CEPH_DIR), shell=True)