Fix some bugs when testing opensds ansible
[stor4nfv.git] / src / ceph / qa / standalone / special / ceph_objectstore_tool.py
1 #!/usr/bin/env python
2
3 from __future__ import print_function
4 from subprocess import call
5 try:
6     from subprocess import check_output
7 except ImportError:
8     def check_output(*popenargs, **kwargs):
9         import subprocess
10         # backported from python 2.7 stdlib
11         process = subprocess.Popen(
12             stdout=subprocess.PIPE, *popenargs, **kwargs)
13         output, unused_err = process.communicate()
14         retcode = process.poll()
15         if retcode:
16             cmd = kwargs.get("args")
17             if cmd is None:
18                 cmd = popenargs[0]
19             error = subprocess.CalledProcessError(retcode, cmd)
20             error.output = output
21             raise error
22         return output
23
24 import filecmp
25 import os
26 import subprocess
27 import math
28 import time
29 import sys
30 import re
31 import logging
32 import json
33 import tempfile
34 import platform
35
36 try:
37     from subprocess import DEVNULL
38 except ImportError:
39     DEVNULL = open(os.devnull, "wb")
40
41 logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
42
43
44 if sys.version_info[0] >= 3:
45     def decode(s):
46         return s.decode('utf-8')
47
48     def check_output(*args, **kwargs):
49         return decode(subprocess.check_output(*args, **kwargs))
50 else:
51     def decode(s):
52         return s
53
54
55
56 def wait_for_health():
57     print("Wait for health_ok...", end="")
58     tries = 0
59     while call("{path}/ceph health 2> /dev/null | grep -v 'HEALTH_OK\|HEALTH_WARN' > /dev/null".format(path=CEPH_BIN), shell=True) == 0:
60         tries += 1
61         if tries == 150:
62             raise Exception("Time exceeded to go to health")
63         time.sleep(1)
64     print("DONE")
65
66
67 def get_pool_id(name, nullfd):
68     cmd = "{path}/ceph osd pool stats {pool}".format(pool=name, path=CEPH_BIN).split()
69     # pool {pool} id # .... grab the 4 field
70     return check_output(cmd, stderr=nullfd).split()[3]
71
72
73 # return a list of unique PGS given an osd subdirectory
74 def get_osd_pgs(SUBDIR, ID):
75     PGS = []
76     if ID:
77         endhead = re.compile("{id}.*_head$".format(id=ID))
78     DIR = os.path.join(SUBDIR, "current")
79     PGS += [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and (ID is None or endhead.match(f))]
80     PGS = [re.sub("_head", "", p) for p in PGS if "_head" in p]
81     return PGS
82
83
84 # return a sorted list of unique PGs given a directory
85 def get_pgs(DIR, ID):
86     OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
87     PGS = []
88     for d in OSDS:
89         SUBDIR = os.path.join(DIR, d)
90         PGS += get_osd_pgs(SUBDIR, ID)
91     return sorted(set(PGS))
92
93
94 # return a sorted list of PGS a subset of ALLPGS that contain objects with prefix specified
95 def get_objs(ALLPGS, prefix, DIR, ID):
96     OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
97     PGS = []
98     for d in OSDS:
99         DIRL2 = os.path.join(DIR, d)
100         SUBDIR = os.path.join(DIRL2, "current")
101         for p in ALLPGS:
102             PGDIR = p + "_head"
103             if not os.path.isdir(os.path.join(SUBDIR, PGDIR)):
104                 continue
105             FINALDIR = os.path.join(SUBDIR, PGDIR)
106             # See if there are any objects there
107             if any(f for f in [val for _, _, fl in os.walk(FINALDIR) for val in fl] if f.startswith(prefix)):
108                 PGS += [p]
109     return sorted(set(PGS))
110
111
112 # return a sorted list of OSDS which have data from a given PG
113 def get_osds(PG, DIR):
114     ALLOSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
115     OSDS = []
116     for d in ALLOSDS:
117         DIRL2 = os.path.join(DIR, d)
118         SUBDIR = os.path.join(DIRL2, "current")
119         PGDIR = PG + "_head"
120         if not os.path.isdir(os.path.join(SUBDIR, PGDIR)):
121             continue
122         OSDS += [d]
123     return sorted(OSDS)
124
125
126 def get_lines(filename):
127     tmpfd = open(filename, "r")
128     line = True
129     lines = []
130     while line:
131         line = tmpfd.readline().rstrip('\n')
132         if line:
133             lines += [line]
134     tmpfd.close()
135     os.unlink(filename)
136     return lines
137
138
139 def cat_file(level, filename):
140     if level < logging.getLogger().getEffectiveLevel():
141         return
142     print("File: " + filename)
143     with open(filename, "r") as f:
144         while True:
145             line = f.readline().rstrip('\n')
146             if not line:
147                 break
148             print(line)
149     print("<EOF>")
150
151
152 def vstart(new, opt=""):
153     print("vstarting....", end="")
154     NEW = new and "-n" or "-N"
155     call("MON=1 OSD=4 MDS=0 MGR=1 CEPH_PORT=7400 {path}/src/vstart.sh --filestore --short -l {new} -d {opt} > /dev/null 2>&1".format(new=NEW, opt=opt, path=CEPH_ROOT), shell=True)
156     print("DONE")
157
158
159 def test_failure(cmd, errmsg, tty=False):
160     if tty:
161         try:
162             ttyfd = open("/dev/tty", "rwb")
163         except Exception as e:
164             logging.info(str(e))
165             logging.info("SKIP " + cmd)
166             return 0
167     TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
168     tmpfd = open(TMPFILE, "wb")
169
170     logging.debug(cmd)
171     if tty:
172         ret = call(cmd, shell=True, stdin=ttyfd, stdout=ttyfd, stderr=tmpfd)
173         ttyfd.close()
174     else:
175         ret = call(cmd, shell=True, stderr=tmpfd)
176     tmpfd.close()
177     if ret == 0:
178         logging.error(cmd)
179         logging.error("Should have failed, but got exit 0")
180         return 1
181     lines = get_lines(TMPFILE)
182     matched = [ l for l in lines if errmsg in l ]
183     if any(matched):
184         logging.info("Correctly failed with message \"" + matched[0] + "\"")
185         return 0
186     else:
187         logging.error("Command: " + cmd )
188         logging.error("Bad messages to stderr \"" + str(lines) + "\"")
189         logging.error("Expected \"" + errmsg + "\"")
190         return 1
191
192
193 def get_nspace(num):
194     if num == 0:
195         return ""
196     return "ns{num}".format(num=num)
197
198
199 def verify(DATADIR, POOL, NAME_PREFIX, db):
200     TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
201     ERRORS = 0
202     for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(NAME_PREFIX) == 0]:
203         nsfile = rawnsfile.split("__")[0]
204         clone = rawnsfile.split("__")[1]
205         nspace = nsfile.split("-")[0]
206         file = nsfile.split("-")[1]
207         # Skip clones
208         if clone != "head":
209             continue
210         path = os.path.join(DATADIR, rawnsfile)
211         try:
212             os.unlink(TMPFILE)
213         except:
214             pass
215         cmd = "{path}/rados -p {pool} -N '{nspace}' get {file} {out}".format(pool=POOL, file=file, out=TMPFILE, nspace=nspace, path=CEPH_BIN)
216         logging.debug(cmd)
217         call(cmd, shell=True, stdout=DEVNULL, stderr=DEVNULL)
218         cmd = "diff -q {src} {result}".format(src=path, result=TMPFILE)
219         logging.debug(cmd)
220         ret = call(cmd, shell=True)
221         if ret != 0:
222             logging.error("{file} data not imported properly".format(file=file))
223             ERRORS += 1
224         try:
225             os.unlink(TMPFILE)
226         except:
227             pass
228         for key, val in db[nspace][file]["xattr"].items():
229             cmd = "{path}/rados -p {pool} -N '{nspace}' getxattr {name} {key}".format(pool=POOL, name=file, key=key, nspace=nspace, path=CEPH_BIN)
230             logging.debug(cmd)
231             getval = check_output(cmd, shell=True, stderr=DEVNULL)
232             logging.debug("getxattr {key} {val}".format(key=key, val=getval))
233             if getval != val:
234                 logging.error("getxattr of key {key} returned wrong val: {get} instead of {orig}".format(key=key, get=getval, orig=val))
235                 ERRORS += 1
236                 continue
237         hdr = db[nspace][file].get("omapheader", "")
238         cmd = "{path}/rados -p {pool} -N '{nspace}' getomapheader {name} {file}".format(pool=POOL, name=file, nspace=nspace, file=TMPFILE, path=CEPH_BIN)
239         logging.debug(cmd)
240         ret = call(cmd, shell=True, stderr=DEVNULL)
241         if ret != 0:
242             logging.error("rados getomapheader returned {ret}".format(ret=ret))
243             ERRORS += 1
244         else:
245             getlines = get_lines(TMPFILE)
246             assert(len(getlines) == 0 or len(getlines) == 1)
247             if len(getlines) == 0:
248                 gethdr = ""
249             else:
250                 gethdr = getlines[0]
251             logging.debug("header: {hdr}".format(hdr=gethdr))
252             if gethdr != hdr:
253                 logging.error("getomapheader returned wrong val: {get} instead of {orig}".format(get=gethdr, orig=hdr))
254                 ERRORS += 1
255         for key, val in db[nspace][file]["omap"].items():
256             cmd = "{path}/rados -p {pool} -N '{nspace}' getomapval {name} {key} {file}".format(pool=POOL, name=file, key=key, nspace=nspace, file=TMPFILE, path=CEPH_BIN)
257             logging.debug(cmd)
258             ret = call(cmd, shell=True, stderr=DEVNULL)
259             if ret != 0:
260                 logging.error("getomapval returned {ret}".format(ret=ret))
261                 ERRORS += 1
262                 continue
263             getlines = get_lines(TMPFILE)
264             if len(getlines) != 1:
265                 logging.error("Bad data from getomapval {lines}".format(lines=getlines))
266                 ERRORS += 1
267                 continue
268             getval = getlines[0]
269             logging.debug("getomapval {key} {val}".format(key=key, val=getval))
270             if getval != val:
271                 logging.error("getomapval returned wrong val: {get} instead of {orig}".format(get=getval, orig=val))
272                 ERRORS += 1
273         try:
274             os.unlink(TMPFILE)
275         except:
276             pass
277     return ERRORS
278
279
280 def check_journal(jsondict):
281     errors = 0
282     if 'header' not in jsondict:
283         logging.error("Key 'header' not in dump-journal")
284         errors += 1
285     elif 'max_size' not in jsondict['header']:
286         logging.error("Key 'max_size' not in dump-journal header")
287         errors += 1
288     else:
289         print("\tJournal max_size = {size}".format(size=jsondict['header']['max_size']))
290     if 'entries' not in jsondict:
291         logging.error("Key 'entries' not in dump-journal output")
292         errors += 1
293     elif len(jsondict['entries']) == 0:
294         logging.info("No entries in journal found")
295     else:
296         errors += check_journal_entries(jsondict['entries'])
297     return errors
298
299
300 def check_journal_entries(entries):
301     errors = 0
302     for enum in range(len(entries)):
303         if 'offset' not in entries[enum]:
304             logging.error("No 'offset' key in entry {e}".format(e=enum))
305             errors += 1
306         if 'seq' not in entries[enum]:
307             logging.error("No 'seq' key in entry {e}".format(e=enum))
308             errors += 1
309         if 'transactions' not in entries[enum]:
310             logging.error("No 'transactions' key in entry {e}".format(e=enum))
311             errors += 1
312         elif len(entries[enum]['transactions']) == 0:
313             logging.error("No transactions found in entry {e}".format(e=enum))
314             errors += 1
315         else:
316             errors += check_entry_transactions(entries[enum], enum)
317     return errors
318
319
320 def check_entry_transactions(entry, enum):
321     errors = 0
322     for tnum in range(len(entry['transactions'])):
323         if 'trans_num' not in entry['transactions'][tnum]:
324             logging.error("Key 'trans_num' missing from entry {e} trans {t}".format(e=enum, t=tnum))
325             errors += 1
326         elif entry['transactions'][tnum]['trans_num'] != tnum:
327             ft = entry['transactions'][tnum]['trans_num']
328             logging.error("Bad trans_num ({ft}) entry {e} trans {t}".format(ft=ft, e=enum, t=tnum))
329             errors += 1
330         if 'ops' not in entry['transactions'][tnum]:
331             logging.error("Key 'ops' missing from entry {e} trans {t}".format(e=enum, t=tnum))
332             errors += 1
333         else:
334             errors += check_transaction_ops(entry['transactions'][tnum]['ops'], enum, tnum)
335     return errors
336
337
338 def check_transaction_ops(ops, enum, tnum):
339     if len(ops) is 0:
340         logging.warning("No ops found in entry {e} trans {t}".format(e=enum, t=tnum))
341     errors = 0
342     for onum in range(len(ops)):
343         if 'op_num' not in ops[onum]:
344             logging.error("Key 'op_num' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum))
345             errors += 1
346         elif ops[onum]['op_num'] != onum:
347             fo = ops[onum]['op_num']
348             logging.error("Bad op_num ({fo}) from entry {e} trans {t} op {o}".format(fo=fo, e=enum, t=tnum, o=onum))
349             errors += 1
350         if 'op_name' not in ops[onum]:
351             logging.error("Key 'op_name' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum))
352             errors += 1
353     return errors
354
355
356 def test_dump_journal(CFSD_PREFIX, osds):
357     ERRORS = 0
358     pid = os.getpid()
359     TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid)
360
361     for osd in osds:
362         # Test --op dump-journal by loading json
363         cmd = (CFSD_PREFIX + "--op dump-journal --format json").format(osd=osd)
364         logging.debug(cmd)
365         tmpfd = open(TMPFILE, "wb")
366         ret = call(cmd, shell=True, stdout=tmpfd)
367         if ret != 0:
368             logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
369             ERRORS += 1
370             continue
371         tmpfd.close()
372         tmpfd = open(TMPFILE, "r")
373         jsondict = json.load(tmpfd)
374         tmpfd.close()
375         os.unlink(TMPFILE)
376
377         journal_errors = check_journal(jsondict)
378         if journal_errors is not 0:
379             logging.error(jsondict)
380         ERRORS += journal_errors
381
382     return ERRORS
383
384 CEPH_BUILD_DIR = os.environ.get('CEPH_BUILD_DIR')
385 CEPH_BIN = os.environ.get('CEPH_BIN')
386 CEPH_ROOT = os.environ.get('CEPH_ROOT')
387
388 if not CEPH_BUILD_DIR:
389     CEPH_BUILD_DIR=os.getcwd()
390     os.putenv('CEPH_BUILD_DIR', CEPH_BUILD_DIR)
391     CEPH_BIN=os.path.join(CEPH_BUILD_DIR, 'bin')
392     os.putenv('CEPH_BIN', CEPH_BIN)
393     CEPH_ROOT=os.path.dirname(CEPH_BUILD_DIR)
394     os.putenv('CEPH_ROOT', CEPH_ROOT)
395     CEPH_LIB=os.path.join(CEPH_BUILD_DIR, 'lib')
396     os.putenv('CEPH_LIB', CEPH_LIB)
397
398 try:
399     os.mkdir("td")
400 except:
401     pass # ok if this is already there
402 CEPH_DIR = os.path.join(CEPH_BUILD_DIR, os.path.join("td", "cot_dir"))
403 CEPH_CONF = os.path.join(CEPH_DIR, 'ceph.conf')
404
405 def kill_daemons():
406     call("{path}/init-ceph -c {conf} stop > /dev/null 2>&1".format(conf=CEPH_CONF, path=CEPH_BIN), shell=True)
407
408
409 def check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME):
410     repcount = 0
411     ERRORS = 0
412     for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(SPLIT_NAME) == 0]:
413         nsfile = rawnsfile.split("__")[0]
414         clone = rawnsfile.split("__")[1]
415         nspace = nsfile.split("-")[0]
416         file = nsfile.split("-")[1] + "__" + clone
417         # Skip clones
418         if clone != "head":
419             continue
420         path = os.path.join(DATADIR, rawnsfile)
421         tmpfd = open(TMPFILE, "wb")
422         cmd = "find {dir} -name '{file}_*_{nspace}_*'".format(dir=OSDDIR, file=file, nspace=nspace)
423         logging.debug(cmd)
424         ret = call(cmd, shell=True, stdout=tmpfd)
425         if ret:
426             logging.critical("INTERNAL ERROR")
427             return 1
428         tmpfd.close()
429         obj_locs = get_lines(TMPFILE)
430         if len(obj_locs) == 0:
431             logging.error("Can't find imported object {name}".format(name=file))
432             ERRORS += 1
433         for obj_loc in obj_locs:
434             # For btrfs skip snap_* dirs
435             if re.search("/snap_[0-9]*/", obj_loc) is not None:
436                 continue
437             repcount += 1
438             cmd = "diff -q {src} {obj_loc}".format(src=path, obj_loc=obj_loc)
439             logging.debug(cmd)
440             ret = call(cmd, shell=True)
441             if ret != 0:
442                 logging.error("{file} data not imported properly into {obj}".format(file=file, obj=obj_loc))
443                 ERRORS += 1
444     return ERRORS, repcount
445
446
447 def set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight):
448     # change the weight of osd.0 to math.pi in the newest osdmap of given osd
449     osdmap_file = tempfile.NamedTemporaryFile(delete=True)
450     cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path,
451                                                                         osdmap_file=osdmap_file.name)
452     output = check_output(cmd, shell=True)
453     epoch = int(re.findall('#(\d+)', output)[0])
454
455     new_crush_file = tempfile.NamedTemporaryFile(delete=True)
456     old_crush_file = tempfile.NamedTemporaryFile(delete=True)
457     ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
458                                                                           crush_file=old_crush_file.name, path=CEPH_BIN),
459                stdout=DEVNULL,
460                stderr=DEVNULL,
461                shell=True)
462     assert(ret == 0)
463
464     for osd_id in osd_ids:
465         cmd = "{path}/crushtool -i {crush_file} --reweight-item osd.{osd} {weight} -o {new_crush_file}".format(osd=osd_id,
466                                                                                                           crush_file=old_crush_file.name,
467                                                                                                           weight=weight,
468                                                                                                           new_crush_file=new_crush_file.name, path=CEPH_BIN)
469         ret = call(cmd, stdout=DEVNULL, shell=True)
470         assert(ret == 0)
471         old_crush_file, new_crush_file = new_crush_file, old_crush_file
472
473     # change them back, since we don't need to preapre for another round
474     old_crush_file, new_crush_file = new_crush_file, old_crush_file
475     old_crush_file.close()
476
477     ret = call("{path}/osdmaptool --import-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
478                                                                                crush_file=new_crush_file.name, path=CEPH_BIN),
479                stdout=DEVNULL,
480                stderr=DEVNULL,
481                shell=True)
482     assert(ret == 0)
483
484     # Minimum test of --dry-run by using it, but not checking anything
485     cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force --dry-run"
486     cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch)
487     ret = call(cmd, stdout=DEVNULL, shell=True)
488     assert(ret == 0)
489
490     # osdmaptool increases the epoch of the changed osdmap, so we need to force the tool
491     # to use use a different epoch than the one in osdmap
492     cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force"
493     cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch)
494     ret = call(cmd, stdout=DEVNULL, shell=True)
495
496     return ret == 0
497
498 def get_osd_weights(CFSD_PREFIX, osd_ids, osd_path):
499     osdmap_file = tempfile.NamedTemporaryFile(delete=True)
500     cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path,
501                                                                         osdmap_file=osdmap_file.name)
502     ret = call(cmd, stdout=DEVNULL, shell=True)
503     if ret != 0:
504         return None
505     # we have to read the weights from the crush map, even we can query the weights using
506     # osdmaptool, but please keep in mind, they are different:
507     #    item weights in crush map versus weight associated with each osd in osdmap
508     crush_file = tempfile.NamedTemporaryFile(delete=True)
509     ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
510                                                                                crush_file=crush_file.name, path=CEPH_BIN),
511                stdout=DEVNULL,
512                shell=True)
513     assert(ret == 0)
514     output = check_output("{path}/crushtool --tree -i {crush_file} | tail -n {num_osd}".format(crush_file=crush_file.name,
515                                                                                           num_osd=len(osd_ids), path=CEPH_BIN),
516                           stderr=DEVNULL,
517                           shell=True)
518     weights = []
519     for line in output.strip().split('\n'):
520         print(line)
521         linev = re.split('\s+', line)
522         if linev[0] is '':
523             linev.pop(0)
524         print('linev %s' % linev)
525         weights.append(float(linev[2]))
526
527     return weights
528
529
530 def test_get_set_osdmap(CFSD_PREFIX, osd_ids, osd_paths):
531     print("Testing get-osdmap and set-osdmap")
532     errors = 0
533     kill_daemons()
534     weight = 1 / math.e           # just some magic number in [0, 1]
535     changed = []
536     for osd_path in osd_paths:
537         if set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight):
538             changed.append(osd_path)
539         else:
540             logging.warning("Failed to change the weights: {0}".format(osd_path))
541     # i am pissed off if none of the store gets changed
542     if not changed:
543         errors += 1
544
545     for osd_path in changed:
546         weights = get_osd_weights(CFSD_PREFIX, osd_ids, osd_path)
547         if not weights:
548             errors += 1
549             continue
550         if any(abs(w - weight) > 1e-5 for w in weights):
551             logging.warning("Weight is not changed: {0} != {1}".format(weights, weight))
552             errors += 1
553     return errors
554
555 def test_get_set_inc_osdmap(CFSD_PREFIX, osd_path):
556     # incrementals are not used unless we need to build an MOSDMap to update
557     # OSD's peers, so an obvious way to test it is simply overwrite an epoch
558     # with a different copy, and read it back to see if it matches.
559     kill_daemons()
560     file_e2 = tempfile.NamedTemporaryFile(delete=True)
561     cmd = (CFSD_PREFIX + "--op get-inc-osdmap --file {file}").format(osd=osd_path,
562                                                                      file=file_e2.name)
563     output = check_output(cmd, shell=True)
564     epoch = int(re.findall('#(\d+)', output)[0])
565     # backup e1 incremental before overwriting it
566     epoch -= 1
567     file_e1_backup = tempfile.NamedTemporaryFile(delete=True)
568     cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}"
569     ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
570     if ret: return 1
571     # overwrite e1 with e2
572     cmd = CFSD_PREFIX + "--op set-inc-osdmap --force --epoch {epoch} --file {file}"
573     ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e2.name), shell=True)
574     if ret: return 1
575     # Use dry-run to set back to e1 which shouldn't happen
576     cmd = CFSD_PREFIX + "--op set-inc-osdmap --dry-run --epoch {epoch} --file {file}"
577     ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
578     if ret: return 1
579     # read from e1
580     file_e1_read = tempfile.NamedTemporaryFile(delete=True)
581     cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}"
582     ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_read.name), shell=True)
583     if ret: return 1
584     errors = 0
585     try:
586         if not filecmp.cmp(file_e2.name, file_e1_read.name, shallow=False):
587             logging.error("{{get,set}}-inc-osdmap mismatch {0} != {1}".format(file_e2.name, file_e1_read.name))
588             errors += 1
589     finally:
590         # revert the change with file_e1_backup
591         cmd = CFSD_PREFIX + "--op set-inc-osdmap --epoch {epoch} --file {file}"
592         ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
593         if ret:
594             logging.error("Failed to revert the changed inc-osdmap")
595             errors += 1
596
597     return errors
598
599
600 def test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS):
601     # Test removeall
602     TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
603     nullfd = open(os.devnull, "w")
604     errors=0
605     print("Test removeall")
606     kill_daemons()
607     for nspace in db.keys():
608         for basename in db[nspace].keys():
609             JSON = db[nspace][basename]['json']
610             for pg in OBJREPPGS:
611                 OSDS = get_osds(pg, OSDDIR)
612                 for osd in OSDS:
613                     DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
614                     fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
615                               and f.split("_")[0] == basename and f.split("_")[4] == nspace]
616                     if not fnames:
617                         continue
618
619                     if int(basename.split(REP_NAME)[1]) <= int(NUM_CLONED_REP_OBJECTS):
620                         cmd = (CFSD_PREFIX + "'{json}' remove").format(osd=osd, json=JSON)
621                         errors += test_failure(cmd, "Snapshots are present, use removeall to delete everything")
622
623                     cmd = (CFSD_PREFIX + " --force --dry-run '{json}' remove").format(osd=osd, json=JSON)
624                     logging.debug(cmd)
625                     ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
626                     if ret != 0:
627                         logging.error("remove with --force failed for {json}".format(json=JSON))
628                         errors += 1
629
630                     cmd = (CFSD_PREFIX + " --dry-run '{json}' removeall").format(osd=osd, json=JSON)
631                     logging.debug(cmd)
632                     ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
633                     if ret != 0:
634                         logging.error("removeall failed for {json}".format(json=JSON))
635                         errors += 1
636
637                     cmd = (CFSD_PREFIX + " '{json}' removeall").format(osd=osd, json=JSON)
638                     logging.debug(cmd)
639                     ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
640                     if ret != 0:
641                         logging.error("removeall failed for {json}".format(json=JSON))
642                         errors += 1
643
644                     tmpfd = open(TMPFILE, "w")
645                     cmd = (CFSD_PREFIX + "--op list --pgid {pg} --namespace {ns} {name}").format(osd=osd, pg=pg, ns=nspace, name=basename)
646                     logging.debug(cmd)
647                     ret = call(cmd, shell=True, stdout=tmpfd)
648                     if ret != 0:
649                         logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
650                         errors += 1
651                     tmpfd.close()
652                     lines = get_lines(TMPFILE)
653                     if len(lines) != 0:
654                         logging.error("Removeall didn't remove all objects {ns}/{name} : {lines}".format(ns=nspace, name=basename, lines=lines))
655                         errors += 1
656     vstart(new=False)
657     wait_for_health()
658     cmd = "{path}/rados -p {pool} rmsnap snap1".format(pool=REP_POOL, path=CEPH_BIN)
659     logging.debug(cmd)
660     ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
661     if ret != 0:
662         logging.error("rados rmsnap failed")
663         errors += 1
664     time.sleep(2)
665     wait_for_health()
666     return errors
667
668
669 def main(argv):
670     if sys.version_info[0] < 3:
671         sys.stdout = stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0)
672     else:
673         stdout = sys.stdout.buffer
674     if len(argv) > 1 and argv[1] == "debug":
675         nullfd = stdout
676     else:
677         nullfd = DEVNULL
678
679     call("rm -fr {dir}; mkdir -p {dir}".format(dir=CEPH_DIR), shell=True)
680     os.chdir(CEPH_DIR)
681     os.environ["CEPH_DIR"] = CEPH_DIR
682     OSDDIR = "dev"
683     REP_POOL = "rep_pool"
684     REP_NAME = "REPobject"
685     EC_POOL = "ec_pool"
686     EC_NAME = "ECobject"
687     if len(argv) > 0 and argv[0] == 'large':
688         PG_COUNT = 12
689         NUM_REP_OBJECTS = 800
690         NUM_CLONED_REP_OBJECTS = 100
691         NUM_EC_OBJECTS = 12
692         NUM_NSPACES = 4
693         # Larger data sets for first object per namespace
694         DATALINECOUNT = 50000
695         # Number of objects to do xattr/omap testing on
696         ATTR_OBJS = 10
697     else:
698         PG_COUNT = 4
699         NUM_REP_OBJECTS = 2
700         NUM_CLONED_REP_OBJECTS = 2
701         NUM_EC_OBJECTS = 2
702         NUM_NSPACES = 2
703         # Larger data sets for first object per namespace
704         DATALINECOUNT = 10
705         # Number of objects to do xattr/omap testing on
706         ATTR_OBJS = 2
707     ERRORS = 0
708     pid = os.getpid()
709     TESTDIR = "/tmp/test.{pid}".format(pid=pid)
710     DATADIR = "/tmp/data.{pid}".format(pid=pid)
711     CFSD_PREFIX = CEPH_BIN + "/ceph-objectstore-tool --data-path " + OSDDIR + "/{osd} "
712     PROFNAME = "testecprofile"
713
714     os.environ['CEPH_CONF'] = CEPH_CONF
715     vstart(new=True)
716     wait_for_health()
717
718     cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=REP_POOL, pg=PG_COUNT, path=CEPH_BIN)
719     logging.debug(cmd)
720     call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
721     time.sleep(2)
722     REPID = get_pool_id(REP_POOL, nullfd)
723
724     print("Created Replicated pool #{repid}".format(repid=REPID))
725
726     cmd = "{path}/ceph osd erasure-code-profile set {prof} crush-failure-domain=osd".format(prof=PROFNAME, path=CEPH_BIN)
727     logging.debug(cmd)
728     call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
729     cmd = "{path}/ceph osd erasure-code-profile get {prof}".format(prof=PROFNAME, path=CEPH_BIN)
730     logging.debug(cmd)
731     call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
732     cmd = "{path}/ceph osd pool create {pool} {pg} {pg} erasure {prof}".format(pool=EC_POOL, prof=PROFNAME, pg=PG_COUNT, path=CEPH_BIN)
733     logging.debug(cmd)
734     call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
735     ECID = get_pool_id(EC_POOL, nullfd)
736
737     print("Created Erasure coded pool #{ecid}".format(ecid=ECID))
738
739     print("Creating {objs} objects in replicated pool".format(objs=(NUM_REP_OBJECTS*NUM_NSPACES)))
740     cmd = "mkdir -p {datadir}".format(datadir=DATADIR)
741     logging.debug(cmd)
742     call(cmd, shell=True)
743
744     db = {}
745
746     objects = range(1, NUM_REP_OBJECTS + 1)
747     nspaces = range(NUM_NSPACES)
748     for n in nspaces:
749         nspace = get_nspace(n)
750
751         db[nspace] = {}
752
753         for i in objects:
754             NAME = REP_NAME + "{num}".format(num=i)
755             LNAME = nspace + "-" + NAME
756             DDNAME = os.path.join(DATADIR, LNAME)
757             DDNAME += "__head"
758
759             cmd = "rm -f " + DDNAME
760             logging.debug(cmd)
761             call(cmd, shell=True)
762
763             if i == 1:
764                 dataline = range(DATALINECOUNT)
765             else:
766                 dataline = range(1)
767             fd = open(DDNAME, "w")
768             data = "This is the replicated data for " + LNAME + "\n"
769             for _ in dataline:
770                 fd.write(data)
771             fd.close()
772
773             cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
774             logging.debug(cmd)
775             ret = call(cmd, shell=True, stderr=nullfd)
776             if ret != 0:
777                 logging.critical("Rados put command failed with {ret}".format(ret=ret))
778                 return 1
779
780             db[nspace][NAME] = {}
781
782             if i < ATTR_OBJS + 1:
783                 keys = range(i)
784             else:
785                 keys = range(0)
786             db[nspace][NAME]["xattr"] = {}
787             for k in keys:
788                 if k == 0:
789                     continue
790                 mykey = "key{i}-{k}".format(i=i, k=k)
791                 myval = "val{i}-{k}".format(i=i, k=k)
792                 cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
793                 logging.debug(cmd)
794                 ret = call(cmd, shell=True)
795                 if ret != 0:
796                     logging.error("setxattr failed with {ret}".format(ret=ret))
797                     ERRORS += 1
798                 db[nspace][NAME]["xattr"][mykey] = myval
799
800             # Create omap header in all objects but REPobject1
801             if i < ATTR_OBJS + 1 and i != 1:
802                 myhdr = "hdr{i}".format(i=i)
803                 cmd = "{path}/rados -p {pool} -N '{nspace}' setomapheader {name} {hdr}".format(pool=REP_POOL, name=NAME, hdr=myhdr, nspace=nspace, path=CEPH_BIN)
804                 logging.debug(cmd)
805                 ret = call(cmd, shell=True)
806                 if ret != 0:
807                     logging.critical("setomapheader failed with {ret}".format(ret=ret))
808                     ERRORS += 1
809                 db[nspace][NAME]["omapheader"] = myhdr
810
811             db[nspace][NAME]["omap"] = {}
812             for k in keys:
813                 if k == 0:
814                     continue
815                 mykey = "okey{i}-{k}".format(i=i, k=k)
816                 myval = "oval{i}-{k}".format(i=i, k=k)
817                 cmd = "{path}/rados -p {pool} -N '{nspace}' setomapval {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
818                 logging.debug(cmd)
819                 ret = call(cmd, shell=True)
820                 if ret != 0:
821                     logging.critical("setomapval failed with {ret}".format(ret=ret))
822                 db[nspace][NAME]["omap"][mykey] = myval
823
824     # Create some clones
825     cmd = "{path}/rados -p {pool} mksnap snap1".format(pool=REP_POOL, path=CEPH_BIN)
826     logging.debug(cmd)
827     call(cmd, shell=True)
828
829     objects = range(1, NUM_CLONED_REP_OBJECTS + 1)
830     nspaces = range(NUM_NSPACES)
831     for n in nspaces:
832         nspace = get_nspace(n)
833
834         for i in objects:
835             NAME = REP_NAME + "{num}".format(num=i)
836             LNAME = nspace + "-" + NAME
837             DDNAME = os.path.join(DATADIR, LNAME)
838             # First clone
839             CLONENAME = DDNAME + "__1"
840             DDNAME += "__head"
841
842             cmd = "mv -f " + DDNAME + " " + CLONENAME
843             logging.debug(cmd)
844             call(cmd, shell=True)
845
846             if i == 1:
847                 dataline = range(DATALINECOUNT)
848             else:
849                 dataline = range(1)
850             fd = open(DDNAME, "w")
851             data = "This is the replicated data after a snapshot for " + LNAME + "\n"
852             for _ in dataline:
853                 fd.write(data)
854             fd.close()
855
856             cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
857             logging.debug(cmd)
858             ret = call(cmd, shell=True, stderr=nullfd)
859             if ret != 0:
860                 logging.critical("Rados put command failed with {ret}".format(ret=ret))
861                 return 1
862
863     print("Creating {objs} objects in erasure coded pool".format(objs=(NUM_EC_OBJECTS*NUM_NSPACES)))
864
865     objects = range(1, NUM_EC_OBJECTS + 1)
866     nspaces = range(NUM_NSPACES)
867     for n in nspaces:
868         nspace = get_nspace(n)
869
870         for i in objects:
871             NAME = EC_NAME + "{num}".format(num=i)
872             LNAME = nspace + "-" + NAME
873             DDNAME = os.path.join(DATADIR, LNAME)
874             DDNAME += "__head"
875
876             cmd = "rm -f " + DDNAME
877             logging.debug(cmd)
878             call(cmd, shell=True)
879
880             if i == 1:
881                 dataline = range(DATALINECOUNT)
882             else:
883                 dataline = range(1)
884             fd = open(DDNAME, "w")
885             data = "This is the erasure coded data for " + LNAME + "\n"
886             for j in dataline:
887                 fd.write(data)
888             fd.close()
889
890             cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=EC_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
891             logging.debug(cmd)
892             ret = call(cmd, shell=True, stderr=nullfd)
893             if ret != 0:
894                 logging.critical("Erasure coded pool creation failed with {ret}".format(ret=ret))
895                 return 1
896
897             db[nspace][NAME] = {}
898
899             db[nspace][NAME]["xattr"] = {}
900             if i < ATTR_OBJS + 1:
901                 keys = range(i)
902             else:
903                 keys = range(0)
904             for k in keys:
905                 if k == 0:
906                     continue
907                 mykey = "key{i}-{k}".format(i=i, k=k)
908                 myval = "val{i}-{k}".format(i=i, k=k)
909                 cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=EC_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
910                 logging.debug(cmd)
911                 ret = call(cmd, shell=True)
912                 if ret != 0:
913                     logging.error("setxattr failed with {ret}".format(ret=ret))
914                     ERRORS += 1
915                 db[nspace][NAME]["xattr"][mykey] = myval
916
917             # Omap isn't supported in EC pools
918             db[nspace][NAME]["omap"] = {}
919
920     logging.debug(db)
921
922     kill_daemons()
923
924     if ERRORS:
925         logging.critical("Unable to set up test")
926         return 1
927
928     ALLREPPGS = get_pgs(OSDDIR, REPID)
929     logging.debug(ALLREPPGS)
930     ALLECPGS = get_pgs(OSDDIR, ECID)
931     logging.debug(ALLECPGS)
932
933     OBJREPPGS = get_objs(ALLREPPGS, REP_NAME, OSDDIR, REPID)
934     logging.debug(OBJREPPGS)
935     OBJECPGS = get_objs(ALLECPGS, EC_NAME, OSDDIR, ECID)
936     logging.debug(OBJECPGS)
937
938     ONEPG = ALLREPPGS[0]
939     logging.debug(ONEPG)
940     osds = get_osds(ONEPG, OSDDIR)
941     ONEOSD = osds[0]
942     logging.debug(ONEOSD)
943
944     print("Test invalid parameters")
945     # On export can't use stdout to a terminal
946     cmd = (CFSD_PREFIX + "--op export --pgid {pg}").format(osd=ONEOSD, pg=ONEPG)
947     ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True)
948
949     # On export can't use stdout to a terminal
950     cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG)
951     ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True)
952
953     # Prep a valid ec export file for import failure tests
954     ONEECPG = ALLECPGS[0]
955     osds = get_osds(ONEECPG, OSDDIR)
956     ONEECOSD = osds[0]
957     OTHERFILE = "/tmp/foo.{pid}".format(pid=pid)
958     cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEECOSD, pg=ONEECPG, file=OTHERFILE)
959     logging.debug(cmd)
960     call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
961
962     # On import can't specify a different shard
963     BADPG = ONEECPG.split('s')[0] + "s10"
964     cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEECOSD, pg=BADPG, file=OTHERFILE)
965     ERRORS += test_failure(cmd, "Can't specify a different shard, must be")
966
967     os.unlink(OTHERFILE)
968
969     # Prep a valid export file for import failure tests
970     OTHERFILE = "/tmp/foo.{pid}".format(pid=pid)
971     cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEOSD, pg=ONEPG, file=OTHERFILE)
972     logging.debug(cmd)
973     call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
974
975     # On import can't specify a PG with a non-existent pool
976     cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEOSD, pg="10.0", file=OTHERFILE)
977     ERRORS += test_failure(cmd, "Can't specify a different pgid pool, must be")
978
979     # On import can't specify shard for a replicated export
980     cmd = (CFSD_PREFIX + "--op import --pgid {pg}s0 --file {file}").format(osd=ONEOSD, pg=ONEPG, file=OTHERFILE)
981     ERRORS += test_failure(cmd, "Can't specify a sharded pgid with a non-sharded export")
982
983     # On import can't specify a PG with a bad seed
984     TMPPG="{pool}.80".format(pool=REPID)
985     cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEOSD, pg=TMPPG, file=OTHERFILE)
986     ERRORS += test_failure(cmd, "Illegal pgid, the seed is larger than current pg_num")
987
988     os.unlink(OTHERFILE)
989     cmd = (CFSD_PREFIX + "--op import --file {FOO}").format(osd=ONEOSD, FOO=OTHERFILE)
990     ERRORS += test_failure(cmd, "file: {FOO}: No such file or directory".format(FOO=OTHERFILE))
991
992     cmd = "{path}/ceph-objectstore-tool --data-path BAD_DATA_PATH --op list".format(osd=ONEOSD, path=CEPH_BIN)
993     ERRORS += test_failure(cmd, "data-path: BAD_DATA_PATH: No such file or directory")
994
995     cmd = "{path}/ceph-objectstore-tool --journal-path BAD_JOURNAL_PATH --op dump-journal".format(path=CEPH_BIN)
996     ERRORS += test_failure(cmd, "journal-path: BAD_JOURNAL_PATH: (2) No such file or directory")
997
998     cmd = (CFSD_PREFIX + "--journal-path BAD_JOURNAL_PATH --op list").format(osd=ONEOSD)
999     ERRORS += test_failure(cmd, "journal-path: BAD_JOURNAL_PATH: No such file or directory")
1000
1001     cmd = (CFSD_PREFIX + "--journal-path /bin --op list").format(osd=ONEOSD)
1002     ERRORS += test_failure(cmd, "journal-path: /bin: (21) Is a directory")
1003
1004     # On import can't use stdin from a terminal
1005     cmd = (CFSD_PREFIX + "--op import --pgid {pg}").format(osd=ONEOSD, pg=ONEPG)
1006     ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True)
1007
1008     # On import can't use stdin from a terminal
1009     cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG)
1010     ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True)
1011
1012     # Specify a bad --type
1013     os.mkdir(OSDDIR + "/fakeosd")
1014     cmd = ("{path}/ceph-objectstore-tool --data-path " + OSDDIR + "/{osd} --type foobar --op list --pgid {pg}").format(osd="fakeosd", pg=ONEPG, path=CEPH_BIN)
1015     ERRORS += test_failure(cmd, "Unable to create store of type foobar")
1016
1017     # Don't specify a data-path
1018     cmd = "{path}/ceph-objectstore-tool --type memstore --op list --pgid {pg}".format(dir=OSDDIR, osd=ONEOSD, pg=ONEPG, path=CEPH_BIN)
1019     ERRORS += test_failure(cmd, "Must provide --data-path")
1020
1021     cmd = (CFSD_PREFIX + "--op remove --pgid 2.0").format(osd=ONEOSD)
1022     ERRORS += test_failure(cmd, "Please use export-remove or you must use --force option")
1023
1024     cmd = (CFSD_PREFIX + "--force --op remove").format(osd=ONEOSD)
1025     ERRORS += test_failure(cmd, "Must provide pgid")
1026
1027     # Don't secify a --op nor object command
1028     cmd = CFSD_PREFIX.format(osd=ONEOSD)
1029     ERRORS += test_failure(cmd, "Must provide --op or object command...")
1030
1031     # Specify a bad --op command
1032     cmd = (CFSD_PREFIX + "--op oops").format(osd=ONEOSD)
1033     ERRORS += test_failure(cmd, "Must provide --op (info, log, remove, mkfs, fsck, export, export-remove, import, list, fix-lost, list-pgs, rm-past-intervals, dump-journal, dump-super, meta-list, get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete)")
1034
1035     # Provide just the object param not a command
1036     cmd = (CFSD_PREFIX + "object").format(osd=ONEOSD)
1037     ERRORS += test_failure(cmd, "Invalid syntax, missing command")
1038
1039     # Provide an object name that doesn't exist
1040     cmd = (CFSD_PREFIX + "NON_OBJECT get-bytes").format(osd=ONEOSD)
1041     ERRORS += test_failure(cmd, "No object id 'NON_OBJECT' found")
1042
1043     # Provide an invalid object command
1044     cmd = (CFSD_PREFIX + "--pgid {pg} '' notacommand").format(osd=ONEOSD, pg=ONEPG)
1045     ERRORS += test_failure(cmd, "Unknown object command 'notacommand'")
1046
1047     cmd = (CFSD_PREFIX + "foo list-omap").format(osd=ONEOSD, pg=ONEPG)
1048     ERRORS += test_failure(cmd, "No object id 'foo' found or invalid JSON specified")
1049
1050     cmd = (CFSD_PREFIX + "'{{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}}' list-omap").format(osd=ONEOSD, pg=ONEPG)
1051     ERRORS += test_failure(cmd, "Without --pgid the object '{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}' must be a JSON array")
1052
1053     cmd = (CFSD_PREFIX + "'[]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1054     ERRORS += test_failure(cmd, "Object '[]' must be a JSON array with 2 elements")
1055
1056     cmd = (CFSD_PREFIX + "'[\"1.0\"]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1057     ERRORS += test_failure(cmd, "Object '[\"1.0\"]' must be a JSON array with 2 elements")
1058
1059     cmd = (CFSD_PREFIX + "'[\"1.0\", 5, 8, 9]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1060     ERRORS += test_failure(cmd, "Object '[\"1.0\", 5, 8, 9]' must be a JSON array with 2 elements")
1061
1062     cmd = (CFSD_PREFIX + "'[1, 2]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1063     ERRORS += test_failure(cmd, "Object '[1, 2]' must be a JSON array with the first element a string")
1064
1065     cmd = (CFSD_PREFIX + "'[\"1.3\",{{\"snapid\":\"not an int\"}}]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1066     ERRORS += test_failure(cmd, "Decode object JSON error: value type is 2 not 4")
1067
1068     TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid)
1069     ALLPGS = OBJREPPGS + OBJECPGS
1070     OSDS = get_osds(ALLPGS[0], OSDDIR)
1071     osd = OSDS[0]
1072
1073     print("Test all --op dump-journal")
1074     ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]
1075     ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS)
1076
1077     # Test --op list and generate json for all objects
1078     print("Test --op list variants")
1079
1080     # retrieve all objects from all PGs
1081     tmpfd = open(TMPFILE, "wb")
1082     cmd = (CFSD_PREFIX + "--op list --format json").format(osd=osd)
1083     logging.debug(cmd)
1084     ret = call(cmd, shell=True, stdout=tmpfd)
1085     if ret != 0:
1086         logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1087         ERRORS += 1
1088     tmpfd.close()
1089     lines = get_lines(TMPFILE)
1090     JSONOBJ = sorted(set(lines))
1091     (pgid, coll, jsondict) = json.loads(JSONOBJ[0])[0]
1092
1093     # retrieve all objects in a given PG
1094     tmpfd = open(OTHERFILE, "ab")
1095     cmd = (CFSD_PREFIX + "--op list --pgid {pg} --format json").format(osd=osd, pg=pgid)
1096     logging.debug(cmd)
1097     ret = call(cmd, shell=True, stdout=tmpfd)
1098     if ret != 0:
1099         logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1100         ERRORS += 1
1101     tmpfd.close()
1102     lines = get_lines(OTHERFILE)
1103     JSONOBJ = sorted(set(lines))
1104     (other_pgid, other_coll, other_jsondict) = json.loads(JSONOBJ[0])[0]
1105
1106     if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll:
1107         logging.error("the first line of --op list is different "
1108                       "from the first line of --op list --pgid {pg}".format(pg=pgid))
1109         ERRORS += 1
1110
1111     # retrieve all objects with a given name in a given PG
1112     tmpfd = open(OTHERFILE, "wb")
1113     cmd = (CFSD_PREFIX + "--op list --pgid {pg} {object} --format json").format(osd=osd, pg=pgid, object=jsondict['oid'])
1114     logging.debug(cmd)
1115     ret = call(cmd, shell=True, stdout=tmpfd)
1116     if ret != 0:
1117         logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1118         ERRORS += 1
1119     tmpfd.close()
1120     lines = get_lines(OTHERFILE)
1121     JSONOBJ = sorted(set(lines))
1122     (other_pgid, other_coll, other_jsondict) in json.loads(JSONOBJ[0])[0]
1123
1124     if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll:
1125         logging.error("the first line of --op list is different "
1126                       "from the first line of --op list --pgid {pg} {object}".format(pg=pgid, object=jsondict['oid']))
1127         ERRORS += 1
1128
1129     print("Test --op list by generating json for all objects using default format")
1130     for pg in ALLPGS:
1131         OSDS = get_osds(pg, OSDDIR)
1132         for osd in OSDS:
1133             tmpfd = open(TMPFILE, "ab")
1134             cmd = (CFSD_PREFIX + "--op list --pgid {pg}").format(osd=osd, pg=pg)
1135             logging.debug(cmd)
1136             ret = call(cmd, shell=True, stdout=tmpfd)
1137             if ret != 0:
1138                 logging.error("Bad exit status {ret} from --op list request".format(ret=ret))
1139                 ERRORS += 1
1140
1141     tmpfd.close()
1142     lines = get_lines(TMPFILE)
1143     JSONOBJ = sorted(set(lines))
1144     for JSON in JSONOBJ:
1145         (pgid, jsondict) = json.loads(JSON)
1146         # Skip clones for now
1147         if jsondict['snapid'] != -2:
1148             continue
1149         db[jsondict['namespace']][jsondict['oid']]['json'] = json.dumps((pgid, jsondict))
1150         # print db[jsondict['namespace']][jsondict['oid']]['json']
1151         if jsondict['oid'].find(EC_NAME) == 0 and 'shard_id' not in jsondict:
1152             logging.error("Malformed JSON {json}".format(json=JSON))
1153             ERRORS += 1
1154
1155     # Test get-bytes
1156     print("Test get-bytes and set-bytes")
1157     for nspace in db.keys():
1158         for basename in db[nspace].keys():
1159             file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1160             JSON = db[nspace][basename]['json']
1161             GETNAME = "/tmp/getbytes.{pid}".format(pid=pid)
1162             TESTNAME = "/tmp/testbytes.{pid}".format(pid=pid)
1163             SETNAME = "/tmp/setbytes.{pid}".format(pid=pid)
1164             BADNAME = "/tmp/badbytes.{pid}".format(pid=pid)
1165             for pg in OBJREPPGS:
1166                 OSDS = get_osds(pg, OSDDIR)
1167                 for osd in OSDS:
1168                     DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1169                     fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1170                               and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1171                     if not fnames:
1172                         continue
1173                     try:
1174                         os.unlink(GETNAME)
1175                     except:
1176                         pass
1177                     cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-bytes {fname}").format(osd=osd, pg=pg, json=JSON, fname=GETNAME)
1178                     logging.debug(cmd)
1179                     ret = call(cmd, shell=True)
1180                     if ret != 0:
1181                         logging.error("Bad exit status {ret}".format(ret=ret))
1182                         ERRORS += 1
1183                         continue
1184                     cmd = "diff -q {file} {getfile}".format(file=file, getfile=GETNAME)
1185                     ret = call(cmd, shell=True)
1186                     if ret != 0:
1187                         logging.error("Data from get-bytes differ")
1188                         logging.debug("Got:")
1189                         cat_file(logging.DEBUG, GETNAME)
1190                         logging.debug("Expected:")
1191                         cat_file(logging.DEBUG, file)
1192                         ERRORS += 1
1193                     fd = open(SETNAME, "w")
1194                     data = "put-bytes going into {file}\n".format(file=file)
1195                     fd.write(data)
1196                     fd.close()
1197                     cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=SETNAME)
1198                     logging.debug(cmd)
1199                     ret = call(cmd, shell=True)
1200                     if ret != 0:
1201                         logging.error("Bad exit status {ret} from set-bytes".format(ret=ret))
1202                         ERRORS += 1
1203                     fd = open(TESTNAME, "wb")
1204                     cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON)
1205                     logging.debug(cmd)
1206                     ret = call(cmd, shell=True, stdout=fd)
1207                     fd.close()
1208                     if ret != 0:
1209                         logging.error("Bad exit status {ret} from get-bytes".format(ret=ret))
1210                         ERRORS += 1
1211                     cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME)
1212                     logging.debug(cmd)
1213                     ret = call(cmd, shell=True)
1214                     if ret != 0:
1215                         logging.error("Data after set-bytes differ")
1216                         logging.debug("Got:")
1217                         cat_file(logging.DEBUG, TESTNAME)
1218                         logging.debug("Expected:")
1219                         cat_file(logging.DEBUG, SETNAME)
1220                         ERRORS += 1
1221
1222                     # Use set-bytes with --dry-run and make sure contents haven't changed
1223                     fd = open(BADNAME, "w")
1224                     data = "Bad data for --dry-run in {file}\n".format(file=file)
1225                     fd.write(data)
1226                     fd.close()
1227                     cmd = (CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=BADNAME)
1228                     logging.debug(cmd)
1229                     ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1230                     if ret != 0:
1231                         logging.error("Bad exit status {ret} from set-bytes --dry-run".format(ret=ret))
1232                         ERRORS += 1
1233                     fd = open(TESTNAME, "wb")
1234                     cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON)
1235                     logging.debug(cmd)
1236                     ret = call(cmd, shell=True, stdout=fd)
1237                     fd.close()
1238                     if ret != 0:
1239                         logging.error("Bad exit status {ret} from get-bytes".format(ret=ret))
1240                         ERRORS += 1
1241                     cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME)
1242                     logging.debug(cmd)
1243                     ret = call(cmd, shell=True)
1244                     if ret != 0:
1245                         logging.error("Data after set-bytes --dry-run changed!")
1246                         logging.debug("Got:")
1247                         cat_file(logging.DEBUG, TESTNAME)
1248                         logging.debug("Expected:")
1249                         cat_file(logging.DEBUG, SETNAME)
1250                         ERRORS += 1
1251
1252                     fd = open(file, "rb")
1253                     cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes").format(osd=osd, pg=pg, json=JSON)
1254                     logging.debug(cmd)
1255                     ret = call(cmd, shell=True, stdin=fd)
1256                     if ret != 0:
1257                         logging.error("Bad exit status {ret} from set-bytes to restore object".format(ret=ret))
1258                         ERRORS += 1
1259                     fd.close()
1260
1261     try:
1262         os.unlink(GETNAME)
1263     except:
1264         pass
1265     try:
1266         os.unlink(TESTNAME)
1267     except:
1268         pass
1269     try:
1270         os.unlink(SETNAME)
1271     except:
1272         pass
1273     try:
1274         os.unlink(BADNAME)
1275     except:
1276         pass
1277
1278     # Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap
1279     print("Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap")
1280     for nspace in db.keys():
1281         for basename in db[nspace].keys():
1282             file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1283             JSON = db[nspace][basename]['json']
1284             for pg in OBJREPPGS:
1285                 OSDS = get_osds(pg, OSDDIR)
1286                 for osd in OSDS:
1287                     DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1288                     fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1289                               and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1290                     if not fnames:
1291                         continue
1292                     for key, val in db[nspace][basename]["xattr"].items():
1293                         attrkey = "_" + key
1294                         cmd = (CFSD_PREFIX + " '{json}' get-attr {key}").format(osd=osd, json=JSON, key=attrkey)
1295                         logging.debug(cmd)
1296                         getval = check_output(cmd, shell=True)
1297                         if getval != val:
1298                             logging.error("get-attr of key {key} returned wrong val: {get} instead of {orig}".format(key=attrkey, get=getval, orig=val))
1299                             ERRORS += 1
1300                             continue
1301                         # set-attr to bogus value "foobar"
1302                         cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1303                         logging.debug(cmd)
1304                         ret = call(cmd, shell=True)
1305                         if ret != 0:
1306                             logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1307                             ERRORS += 1
1308                             continue
1309                         # Test set-attr with dry-run
1310                         cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1311                         logging.debug(cmd)
1312                         ret = call(cmd, shell=True, stdout=nullfd)
1313                         if ret != 0:
1314                             logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1315                             ERRORS += 1
1316                             continue
1317                         # Check the set-attr
1318                         cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1319                         logging.debug(cmd)
1320                         getval = check_output(cmd, shell=True)
1321                         if ret != 0:
1322                             logging.error("Bad exit status {ret} from get-attr".format(ret=ret))
1323                             ERRORS += 1
1324                             continue
1325                         if getval != "foobar":
1326                             logging.error("Check of set-attr failed because we got {val}".format(val=getval))
1327                             ERRORS += 1
1328                             continue
1329                         # Test rm-attr
1330                         cmd = (CFSD_PREFIX + "'{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1331                         logging.debug(cmd)
1332                         ret = call(cmd, shell=True)
1333                         if ret != 0:
1334                             logging.error("Bad exit status {ret} from rm-attr".format(ret=ret))
1335                             ERRORS += 1
1336                             continue
1337                         # Check rm-attr with dry-run
1338                         cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1339                         logging.debug(cmd)
1340                         ret = call(cmd, shell=True, stdout=nullfd)
1341                         if ret != 0:
1342                             logging.error("Bad exit status {ret} from rm-attr".format(ret=ret))
1343                             ERRORS += 1
1344                             continue
1345                         cmd = (CFSD_PREFIX + "'{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1346                         logging.debug(cmd)
1347                         ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd)
1348                         if ret == 0:
1349                             logging.error("For rm-attr expect get-attr to fail, but it succeeded")
1350                             ERRORS += 1
1351                         # Put back value
1352                         cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey, val=val)
1353                         logging.debug(cmd)
1354                         ret = call(cmd, shell=True)
1355                         if ret != 0:
1356                             logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1357                             ERRORS += 1
1358                             continue
1359
1360                     hdr = db[nspace][basename].get("omapheader", "")
1361                     cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, json=JSON)
1362                     logging.debug(cmd)
1363                     gethdr = check_output(cmd, shell=True)
1364                     if gethdr != hdr:
1365                         logging.error("get-omaphdr was wrong: {get} instead of {orig}".format(get=gethdr, orig=hdr))
1366                         ERRORS += 1
1367                         continue
1368                     # set-omaphdr to bogus value "foobar"
1369                     cmd = ("echo -n foobar | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON)
1370                     logging.debug(cmd)
1371                     ret = call(cmd, shell=True)
1372                     if ret != 0:
1373                         logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1374                         ERRORS += 1
1375                         continue
1376                     # Check the set-omaphdr
1377                     cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, pg=pg, json=JSON)
1378                     logging.debug(cmd)
1379                     gethdr = check_output(cmd, shell=True)
1380                     if ret != 0:
1381                         logging.error("Bad exit status {ret} from get-omaphdr".format(ret=ret))
1382                         ERRORS += 1
1383                         continue
1384                     if gethdr != "foobar":
1385                         logging.error("Check of set-omaphdr failed because we got {val}".format(val=getval))
1386                         ERRORS += 1
1387                         continue
1388                     # Test dry-run with set-omaphdr
1389                     cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON)
1390                     logging.debug(cmd)
1391                     ret = call(cmd, shell=True, stdout=nullfd)
1392                     if ret != 0:
1393                         logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1394                         ERRORS += 1
1395                         continue
1396                     # Put back value
1397                     cmd = ("echo -n {val} | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON, val=hdr)
1398                     logging.debug(cmd)
1399                     ret = call(cmd, shell=True)
1400                     if ret != 0:
1401                         logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1402                         ERRORS += 1
1403                         continue
1404
1405                     for omapkey, val in db[nspace][basename]["omap"].items():
1406                         cmd = (CFSD_PREFIX + " '{json}' get-omap {key}").format(osd=osd, json=JSON, key=omapkey)
1407                         logging.debug(cmd)
1408                         getval = check_output(cmd, shell=True)
1409                         if getval != val:
1410                             logging.error("get-omap of key {key} returned wrong val: {get} instead of {orig}".format(key=omapkey, get=getval, orig=val))
1411                             ERRORS += 1
1412                             continue
1413                         # set-omap to bogus value "foobar"
1414                         cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1415                         logging.debug(cmd)
1416                         ret = call(cmd, shell=True)
1417                         if ret != 0:
1418                             logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1419                             ERRORS += 1
1420                             continue
1421                         # Check set-omap with dry-run
1422                         cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1423                         logging.debug(cmd)
1424                         ret = call(cmd, shell=True, stdout=nullfd)
1425                         if ret != 0:
1426                             logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1427                             ERRORS += 1
1428                             continue
1429                         # Check the set-omap
1430                         cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1431                         logging.debug(cmd)
1432                         getval = check_output(cmd, shell=True)
1433                         if ret != 0:
1434                             logging.error("Bad exit status {ret} from get-omap".format(ret=ret))
1435                             ERRORS += 1
1436                             continue
1437                         if getval != "foobar":
1438                             logging.error("Check of set-omap failed because we got {val}".format(val=getval))
1439                             ERRORS += 1
1440                             continue
1441                         # Test rm-omap
1442                         cmd = (CFSD_PREFIX + "'{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1443                         logging.debug(cmd)
1444                         ret = call(cmd, shell=True)
1445                         if ret != 0:
1446                             logging.error("Bad exit status {ret} from rm-omap".format(ret=ret))
1447                             ERRORS += 1
1448                         # Check rm-omap with dry-run
1449                         cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1450                         logging.debug(cmd)
1451                         ret = call(cmd, shell=True, stdout=nullfd)
1452                         if ret != 0:
1453                             logging.error("Bad exit status {ret} from rm-omap".format(ret=ret))
1454                             ERRORS += 1
1455                         cmd = (CFSD_PREFIX + "'{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1456                         logging.debug(cmd)
1457                         ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd)
1458                         if ret == 0:
1459                             logging.error("For rm-omap expect get-omap to fail, but it succeeded")
1460                             ERRORS += 1
1461                         # Put back value
1462                         cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey, val=val)
1463                         logging.debug(cmd)
1464                         ret = call(cmd, shell=True)
1465                         if ret != 0:
1466                             logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1467                             ERRORS += 1
1468                             continue
1469
1470     # Test dump
1471     print("Test dump")
1472     for nspace in db.keys():
1473         for basename in db[nspace].keys():
1474             file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1475             JSON = db[nspace][basename]['json']
1476             GETNAME = "/tmp/getbytes.{pid}".format(pid=pid)
1477             for pg in OBJREPPGS:
1478                 OSDS = get_osds(pg, OSDDIR)
1479                 for osd in OSDS:
1480                     DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1481                     fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1482                               and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1483                     if not fnames:
1484                         continue
1485                     if int(basename.split(REP_NAME)[1]) > int(NUM_CLONED_REP_OBJECTS):
1486                         continue
1487                     cmd = (CFSD_PREFIX + " '{json}' dump | grep '\"snap\": 1,' > /dev/null").format(osd=osd, json=JSON)
1488                     logging.debug(cmd)
1489                     ret = call(cmd, shell=True)
1490                     if ret != 0:
1491                         logging.error("Invalid dump for {json}".format(json=JSON))
1492                         ERRORS += 1
1493
1494     print("Test list-attrs get-attr")
1495     ATTRFILE = r"/tmp/attrs.{pid}".format(pid=pid)
1496     VALFILE = r"/tmp/val.{pid}".format(pid=pid)
1497     for nspace in db.keys():
1498         for basename in db[nspace].keys():
1499             file = os.path.join(DATADIR, nspace + "-" + basename)
1500             JSON = db[nspace][basename]['json']
1501             jsondict = json.loads(JSON)
1502
1503             if 'shard_id' in jsondict:
1504                 logging.debug("ECobject " + JSON)
1505                 found = 0
1506                 for pg in OBJECPGS:
1507                     OSDS = get_osds(pg, OSDDIR)
1508                     # Fix shard_id since we only have one json instance for each object
1509                     jsondict['shard_id'] = int(pg.split('s')[1])
1510                     JSON = json.dumps(jsondict)
1511                     for osd in OSDS:
1512                         cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-attr hinfo_key").format(osd=osd, pg=pg, json=JSON)
1513                         logging.debug("TRY: " + cmd)
1514                         try:
1515                             out = check_output(cmd, shell=True, stderr=subprocess.STDOUT)
1516                             logging.debug("FOUND: {json} in {osd} has value '{val}'".format(osd=osd, json=JSON, val=out))
1517                             found += 1
1518                         except subprocess.CalledProcessError as e:
1519                             if "No such file or directory" not in e.output and "No data available" not in e.output:
1520                                 raise
1521                 # Assuming k=2 m=1 for the default ec pool
1522                 if found != 3:
1523                     logging.error("{json} hinfo_key found {found} times instead of 3".format(json=JSON, found=found))
1524                     ERRORS += 1
1525
1526             for pg in ALLPGS:
1527                 # Make sure rep obj with rep pg or ec obj with ec pg
1528                 if ('shard_id' in jsondict) != (pg.find('s') > 0):
1529                     continue
1530                 if 'shard_id' in jsondict:
1531                     # Fix shard_id since we only have one json instance for each object
1532                     jsondict['shard_id'] = int(pg.split('s')[1])
1533                     JSON = json.dumps(jsondict)
1534                 OSDS = get_osds(pg, OSDDIR)
1535                 for osd in OSDS:
1536                     DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1537                     fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1538                               and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1539                     if not fnames:
1540                         continue
1541                     afd = open(ATTRFILE, "wb")
1542                     cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' list-attrs").format(osd=osd, pg=pg, json=JSON)
1543                     logging.debug(cmd)
1544                     ret = call(cmd, shell=True, stdout=afd)
1545                     afd.close()
1546                     if ret != 0:
1547                         logging.error("list-attrs failed with {ret}".format(ret=ret))
1548                         ERRORS += 1
1549                         continue
1550                     keys = get_lines(ATTRFILE)
1551                     values = dict(db[nspace][basename]["xattr"])
1552                     for key in keys:
1553                         if key == "_" or key == "snapset" or key == "hinfo_key":
1554                             continue
1555                         key = key.strip("_")
1556                         if key not in values:
1557                             logging.error("Unexpected key {key} present".format(key=key))
1558                             ERRORS += 1
1559                             continue
1560                         exp = values.pop(key)
1561                         vfd = open(VALFILE, "wb")
1562                         cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key="_" + key)
1563                         logging.debug(cmd)
1564                         ret = call(cmd, shell=True, stdout=vfd)
1565                         vfd.close()
1566                         if ret != 0:
1567                             logging.error("get-attr failed with {ret}".format(ret=ret))
1568                             ERRORS += 1
1569                             continue
1570                         lines = get_lines(VALFILE)
1571                         val = lines[0]
1572                         if exp != val:
1573                             logging.error("For key {key} got value {got} instead of {expected}".format(key=key, got=val, expected=exp))
1574                             ERRORS += 1
1575                     if len(values) != 0:
1576                         logging.error("Not all keys found, remaining keys:")
1577                         print(values)
1578
1579     print("Test --op meta-list")
1580     tmpfd = open(TMPFILE, "wb")
1581     cmd = (CFSD_PREFIX + "--op meta-list").format(osd=ONEOSD)
1582     logging.debug(cmd)
1583     ret = call(cmd, shell=True, stdout=tmpfd)
1584     if ret != 0:
1585         logging.error("Bad exit status {ret} from --op meta-list request".format(ret=ret))
1586         ERRORS += 1
1587
1588     print("Test get-bytes on meta")
1589     tmpfd.close()
1590     lines = get_lines(TMPFILE)
1591     JSONOBJ = sorted(set(lines))
1592     for JSON in JSONOBJ:
1593         (pgid, jsondict) = json.loads(JSON)
1594         if pgid != "meta":
1595             logging.error("pgid incorrect for --op meta-list {pgid}".format(pgid=pgid))
1596             ERRORS += 1
1597         if jsondict['namespace'] != "":
1598             logging.error("namespace non null --op meta-list {ns}".format(ns=jsondict['namespace']))
1599             ERRORS += 1
1600         logging.info(JSON)
1601         try:
1602             os.unlink(GETNAME)
1603         except:
1604             pass
1605         cmd = (CFSD_PREFIX + "'{json}' get-bytes {fname}").format(osd=ONEOSD, json=JSON, fname=GETNAME)
1606         logging.debug(cmd)
1607         ret = call(cmd, shell=True)
1608         if ret != 0:
1609             logging.error("Bad exit status {ret}".format(ret=ret))
1610             ERRORS += 1
1611
1612     try:
1613         os.unlink(GETNAME)
1614     except:
1615         pass
1616     try:
1617         os.unlink(TESTNAME)
1618     except:
1619         pass
1620
1621     print("Test pg info")
1622     for pg in ALLREPPGS + ALLECPGS:
1623         for osd in get_osds(pg, OSDDIR):
1624             cmd = (CFSD_PREFIX + "--op info --pgid {pg} | grep '\"pgid\": \"{pg}\"'").format(osd=osd, pg=pg)
1625             logging.debug(cmd)
1626             ret = call(cmd, shell=True, stdout=nullfd)
1627             if ret != 0:
1628                 logging.error("Getting info failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1629                 ERRORS += 1
1630
1631     print("Test pg logging")
1632     if len(ALLREPPGS + ALLECPGS) == len(OBJREPPGS + OBJECPGS):
1633         logging.warning("All PGs have objects, so no log without modify entries")
1634     for pg in ALLREPPGS + ALLECPGS:
1635         for osd in get_osds(pg, OSDDIR):
1636             tmpfd = open(TMPFILE, "wb")
1637             cmd = (CFSD_PREFIX + "--op log --pgid {pg}").format(osd=osd, pg=pg)
1638             logging.debug(cmd)
1639             ret = call(cmd, shell=True, stdout=tmpfd)
1640             if ret != 0:
1641                 logging.error("Getting log failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1642                 ERRORS += 1
1643             HASOBJ = pg in OBJREPPGS + OBJECPGS
1644             MODOBJ = False
1645             for line in get_lines(TMPFILE):
1646                 if line.find("modify") != -1:
1647                     MODOBJ = True
1648                     break
1649             if HASOBJ != MODOBJ:
1650                 logging.error("Bad log for pg {pg} from {osd}".format(pg=pg, osd=osd))
1651                 MSG = (HASOBJ and [""] or ["NOT "])[0]
1652                 print("Log should {msg}have a modify entry".format(msg=MSG))
1653                 ERRORS += 1
1654
1655     try:
1656         os.unlink(TMPFILE)
1657     except:
1658         pass
1659
1660     print("Test list-pgs")
1661     for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1662
1663         CHECK_PGS = get_osd_pgs(os.path.join(OSDDIR, osd), None)
1664         CHECK_PGS = sorted(CHECK_PGS)
1665
1666         cmd = (CFSD_PREFIX + "--op list-pgs").format(osd=osd)
1667         logging.debug(cmd)
1668         TEST_PGS = check_output(cmd, shell=True).split("\n")
1669         TEST_PGS = sorted(TEST_PGS)[1:]  # Skip extra blank line
1670
1671         if TEST_PGS != CHECK_PGS:
1672             logging.error("list-pgs got wrong result for osd.{osd}".format(osd=osd))
1673             logging.error("Expected {pgs}".format(pgs=CHECK_PGS))
1674             logging.error("Got {pgs}".format(pgs=TEST_PGS))
1675             ERRORS += 1
1676
1677     EXP_ERRORS = 0
1678     print("Test pg export --dry-run")
1679     pg = ALLREPPGS[0]
1680     osd = get_osds(pg, OSDDIR)[0]
1681     fname = "/tmp/fname.{pid}".format(pid=pid)
1682     cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1683     logging.debug(cmd)
1684     ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1685     if ret != 0:
1686         logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1687         EXP_ERRORS += 1
1688     elif os.path.exists(fname):
1689         logging.error("Exporting --dry-run created file")
1690         EXP_ERRORS += 1
1691
1692     cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname)
1693     logging.debug(cmd)
1694     ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1695     if ret != 0:
1696         logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1697         EXP_ERRORS += 1
1698     else:
1699         outdata = get_lines(fname)
1700         if len(outdata) > 0:
1701             logging.error("Exporting --dry-run to stdout not empty")
1702             logging.error("Data: " + outdata)
1703             EXP_ERRORS += 1
1704
1705     os.mkdir(TESTDIR)
1706     for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1707         os.mkdir(os.path.join(TESTDIR, osd))
1708     print("Test pg export")
1709     for pg in ALLREPPGS + ALLECPGS:
1710         for osd in get_osds(pg, OSDDIR):
1711             mydir = os.path.join(TESTDIR, osd)
1712             fname = os.path.join(mydir, pg)
1713             if pg == ALLREPPGS[0]:
1714                 cmd = (CFSD_PREFIX + "--op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname)
1715             elif pg == ALLREPPGS[1]:
1716                 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file - > {file}").format(osd=osd, pg=pg, file=fname)
1717             else:
1718                 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1719             logging.debug(cmd)
1720             ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1721             if ret != 0:
1722                 logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1723                 EXP_ERRORS += 1
1724
1725     ERRORS += EXP_ERRORS
1726
1727     print("Test pg removal")
1728     RM_ERRORS = 0
1729     for pg in ALLREPPGS + ALLECPGS:
1730         for osd in get_osds(pg, OSDDIR):
1731             # This should do nothing
1732             cmd = (CFSD_PREFIX + "--op remove --pgid {pg} --dry-run").format(pg=pg, osd=osd)
1733             logging.debug(cmd)
1734             ret = call(cmd, shell=True, stdout=nullfd)
1735             if ret != 0:
1736                 logging.error("Removing --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1737                 RM_ERRORS += 1
1738             cmd = (CFSD_PREFIX + "--force --op remove --pgid {pg}").format(pg=pg, osd=osd)
1739             logging.debug(cmd)
1740             ret = call(cmd, shell=True, stdout=nullfd)
1741             if ret != 0:
1742                 logging.error("Removing failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1743                 RM_ERRORS += 1
1744
1745     ERRORS += RM_ERRORS
1746
1747     IMP_ERRORS = 0
1748     if EXP_ERRORS == 0 and RM_ERRORS == 0:
1749         print("Test pg import")
1750         for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1751             dir = os.path.join(TESTDIR, osd)
1752             PGS = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]
1753             for pg in PGS:
1754                 file = os.path.join(dir, pg)
1755                 # This should do nothing
1756                 cmd = (CFSD_PREFIX + "--op import --file {file} --dry-run").format(osd=osd, file=file)
1757                 logging.debug(cmd)
1758                 ret = call(cmd, shell=True, stdout=nullfd)
1759                 if ret != 0:
1760                     logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1761                     IMP_ERRORS += 1
1762                 if pg == PGS[0]:
1763                     cmd = ("cat {file} |".format(file=file) + CFSD_PREFIX + "--op import").format(osd=osd)
1764                 elif pg == PGS[1]:
1765                     cmd = (CFSD_PREFIX + "--op import --file - --pgid {pg} < {file}").format(osd=osd, file=file, pg=pg)
1766                 else:
1767                     cmd = (CFSD_PREFIX + "--op import --file {file}").format(osd=osd, file=file)
1768                 logging.debug(cmd)
1769                 ret = call(cmd, shell=True, stdout=nullfd)
1770                 if ret != 0:
1771                     logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1772                     IMP_ERRORS += 1
1773     else:
1774         logging.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")
1775
1776     ERRORS += IMP_ERRORS
1777     logging.debug(cmd)
1778
1779     if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
1780         print("Verify replicated import data")
1781         data_errors, _ = check_data(DATADIR, TMPFILE, OSDDIR, REP_NAME)
1782         ERRORS += data_errors
1783     else:
1784         logging.warning("SKIPPING CHECKING IMPORT DATA DUE TO PREVIOUS FAILURES")
1785
1786     print("Test all --op dump-journal again")
1787     ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]
1788     ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS)
1789
1790     vstart(new=False)
1791     wait_for_health()
1792
1793     if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
1794         print("Verify erasure coded import data")
1795         ERRORS += verify(DATADIR, EC_POOL, EC_NAME, db)
1796         # Check replicated data/xattr/omap using rados
1797         print("Verify replicated import data using rados")
1798         ERRORS += verify(DATADIR, REP_POOL, REP_NAME, db)
1799
1800     if EXP_ERRORS == 0:
1801         NEWPOOL = "rados-import-pool"
1802         cmd = "{path}/rados mkpool {pool}".format(pool=NEWPOOL, path=CEPH_BIN)
1803         logging.debug(cmd)
1804         ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1805
1806         print("Test rados import")
1807         first = True
1808         for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1809             dir = os.path.join(TESTDIR, osd)
1810             for pg in [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]:
1811                 if pg.find("{id}.".format(id=REPID)) != 0:
1812                     continue
1813                 file = os.path.join(dir, pg)
1814                 if first:
1815                     first = False
1816                     # This should do nothing
1817                     cmd = "{path}/rados import -p {pool} --dry-run {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1818                     logging.debug(cmd)
1819                     ret = call(cmd, shell=True, stdout=nullfd)
1820                     if ret != 0:
1821                         logging.error("Rados import --dry-run failed from {file} with {ret}".format(file=file, ret=ret))
1822                         ERRORS += 1
1823                     cmd = "{path}/rados -p {pool} ls".format(pool=NEWPOOL, path=CEPH_BIN)
1824                     logging.debug(cmd)
1825                     data = check_output(cmd, shell=True)
1826                     if data:
1827                         logging.error("'{data}'".format(data=data))
1828                         logging.error("Found objects after dry-run")
1829                         ERRORS += 1
1830                 cmd = "{path}/rados import -p {pool} {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1831                 logging.debug(cmd)
1832                 ret = call(cmd, shell=True, stdout=nullfd)
1833                 if ret != 0:
1834                     logging.error("Rados import failed from {file} with {ret}".format(file=file, ret=ret))
1835                     ERRORS += 1
1836                 cmd = "{path}/rados import -p {pool} --no-overwrite {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1837                 logging.debug(cmd)
1838                 ret = call(cmd, shell=True, stdout=nullfd)
1839                 if ret != 0:
1840                     logging.error("Rados import --no-overwrite failed from {file} with {ret}".format(file=file, ret=ret))
1841                     ERRORS += 1
1842
1843         ERRORS += verify(DATADIR, NEWPOOL, REP_NAME, db)
1844     else:
1845         logging.warning("SKIPPING IMPORT-RADOS TESTS DUE TO PREVIOUS FAILURES")
1846
1847     # Clear directories of previous portion
1848     call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
1849     call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
1850     os.mkdir(TESTDIR)
1851     os.mkdir(DATADIR)
1852
1853     # Cause SPLIT_POOL to split and test import with object/log filtering
1854     print("Testing import all objects after a split")
1855     SPLIT_POOL = "split_pool"
1856     PG_COUNT = 1
1857     SPLIT_OBJ_COUNT = 5
1858     SPLIT_NSPACE_COUNT = 2
1859     SPLIT_NAME = "split"
1860     cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=SPLIT_POOL, pg=PG_COUNT, path=CEPH_BIN)
1861     logging.debug(cmd)
1862     call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1863     SPLITID = get_pool_id(SPLIT_POOL, nullfd)
1864     pool_size = int(check_output("{path}/ceph osd pool get {pool} size".format(pool=SPLIT_POOL, path=CEPH_BIN), shell=True, stderr=nullfd).split(" ")[1])
1865     EXP_ERRORS = 0
1866     RM_ERRORS = 0
1867     IMP_ERRORS = 0
1868
1869     objects = range(1, SPLIT_OBJ_COUNT + 1)
1870     nspaces = range(SPLIT_NSPACE_COUNT)
1871     for n in nspaces:
1872         nspace = get_nspace(n)
1873
1874         for i in objects:
1875             NAME = SPLIT_NAME + "{num}".format(num=i)
1876             LNAME = nspace + "-" + NAME
1877             DDNAME = os.path.join(DATADIR, LNAME)
1878             DDNAME += "__head"
1879
1880             cmd = "rm -f " + DDNAME
1881             logging.debug(cmd)
1882             call(cmd, shell=True)
1883
1884             if i == 1:
1885                 dataline = range(DATALINECOUNT)
1886             else:
1887                 dataline = range(1)
1888             fd = open(DDNAME, "w")
1889             data = "This is the split data for " + LNAME + "\n"
1890             for _ in dataline:
1891                 fd.write(data)
1892             fd.close()
1893
1894             cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=SPLIT_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
1895             logging.debug(cmd)
1896             ret = call(cmd, shell=True, stderr=nullfd)
1897             if ret != 0:
1898                 logging.critical("Rados put command failed with {ret}".format(ret=ret))
1899                 return 1
1900
1901     wait_for_health()
1902     kill_daemons()
1903
1904     for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1905         os.mkdir(os.path.join(TESTDIR, osd))
1906
1907     pg = "{pool}.0".format(pool=SPLITID)
1908     EXPORT_PG = pg
1909
1910     export_osds = get_osds(pg, OSDDIR)
1911     for osd in export_osds:
1912         mydir = os.path.join(TESTDIR, osd)
1913         fname = os.path.join(mydir, pg)
1914         cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1915         logging.debug(cmd)
1916         ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1917         if ret != 0:
1918             logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1919             EXP_ERRORS += 1
1920
1921     ERRORS += EXP_ERRORS
1922
1923     if EXP_ERRORS == 0:
1924         vstart(new=False)
1925         wait_for_health()
1926
1927         cmd = "{path}/ceph osd pool set {pool} pg_num 2".format(pool=SPLIT_POOL, path=CEPH_BIN)
1928         logging.debug(cmd)
1929         ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1930         time.sleep(5)
1931         wait_for_health()
1932
1933         kill_daemons()
1934
1935         # Now 2 PGs, poolid.0 and poolid.1
1936         for seed in range(2):
1937             pg = "{pool}.{seed}".format(pool=SPLITID, seed=seed)
1938
1939             which = 0
1940             for osd in get_osds(pg, OSDDIR):
1941                 cmd = (CFSD_PREFIX + "--force --op remove --pgid {pg}").format(pg=pg, osd=osd)
1942                 logging.debug(cmd)
1943                 ret = call(cmd, shell=True, stdout=nullfd)
1944
1945                 # This is weird.  The export files are based on only the EXPORT_PG
1946                 # and where that pg was before the split.  Use 'which' to use all
1947                 # export copies in import.
1948                 mydir = os.path.join(TESTDIR, export_osds[which])
1949                 fname = os.path.join(mydir, EXPORT_PG)
1950                 which += 1
1951                 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1952                 logging.debug(cmd)
1953                 ret = call(cmd, shell=True, stdout=nullfd)
1954                 if ret != 0:
1955                     logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1956                     IMP_ERRORS += 1
1957
1958         ERRORS += IMP_ERRORS
1959
1960         # Start up again to make sure imports didn't corrupt anything
1961         if IMP_ERRORS == 0:
1962             print("Verify split import data")
1963             data_errors, count = check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME)
1964             ERRORS += data_errors
1965             if count != (SPLIT_OBJ_COUNT * SPLIT_NSPACE_COUNT * pool_size):
1966                 logging.error("Incorrect number of replicas seen {count}".format(count=count))
1967                 ERRORS += 1
1968             vstart(new=False)
1969             wait_for_health()
1970
1971     call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
1972     call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
1973
1974     ERRORS += test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS)
1975
1976     # vstart() starts 4 OSDs
1977     ERRORS += test_get_set_osdmap(CFSD_PREFIX, list(range(4)), ALLOSDS)
1978     ERRORS += test_get_set_inc_osdmap(CFSD_PREFIX, ALLOSDS[0])
1979
1980     kill_daemons()
1981     CORES = [f for f in os.listdir(CEPH_DIR) if f.startswith("core.")]
1982     if CORES:
1983         CORE_DIR = os.path.join("/tmp", "cores.{pid}".format(pid=os.getpid()))
1984         os.mkdir(CORE_DIR)
1985         call("/bin/mv {ceph_dir}/core.* {core_dir}".format(ceph_dir=CEPH_DIR, core_dir=CORE_DIR), shell=True)
1986         logging.error("Failure due to cores found")
1987         logging.error("See {core_dir} for cores".format(core_dir=CORE_DIR))
1988         ERRORS += len(CORES)
1989
1990     if ERRORS == 0:
1991         print("TEST PASSED")
1992         return 0
1993     else:
1994         print("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))
1995         return 1
1996
1997
1998 def remove_btrfs_subvolumes(path):
1999     if platform.system() == "FreeBSD":
2000         return
2001     result = subprocess.Popen("stat -f -c '%%T' %s" % path, shell=True, stdout=subprocess.PIPE)
2002     for line in result.stdout:
2003         filesystem = decode(line).rstrip('\n')
2004     if filesystem == "btrfs":
2005         result = subprocess.Popen("sudo btrfs subvolume list %s" % path, shell=True, stdout=subprocess.PIPE)
2006         for line in result.stdout:
2007             subvolume = decode(line).split()[8]
2008             # extracting the relative volume name
2009             m = re.search(".*(%s.*)" % path, subvolume)
2010             if m:
2011                 found = m.group(1)
2012                 call("sudo btrfs subvolume delete %s" % found, shell=True)
2013
2014
2015 if __name__ == "__main__":
2016     status = 1
2017     try:
2018         status = main(sys.argv[1:])
2019     finally:
2020         kill_daemons()
2021         os.chdir(CEPH_BUILD_DIR)
2022         remove_btrfs_subvolumes(CEPH_DIR)
2023         call("/bin/rm -fr {dir}".format(dir=CEPH_DIR), shell=True)
2024     sys.exit(status)