1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2013 Inktank
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
15 #include <boost/program_options/variables_map.hpp>
16 #include <boost/program_options/parsers.hpp>
17 #include <boost/scoped_ptr.hpp>
18 #include <boost/optional.hpp>
22 #include "common/Formatter.h"
23 #include "common/errno.h"
24 #include "common/ceph_argparse.h"
26 #include "global/global_init.h"
28 #include "os/ObjectStore.h"
29 #include "os/filestore/FileJournal.h"
30 #include "os/filestore/FileStore.h"
32 #include "os/FuseStore.h"
35 #include "osd/PGLog.h"
39 #include "json_spirit/json_spirit_value.h"
40 #include "json_spirit/json_spirit_reader.h"
42 #include "rebuild_mondb.h"
43 #include "ceph_objectstore_tool.h"
44 #include "include/compat.h"
45 #include "include/util.h"
47 namespace po = boost::program_options;
51 CompatSet get_test_compat_set() {
52 CompatSet::FeatureSet ceph_osd_feature_compat;
53 CompatSet::FeatureSet ceph_osd_feature_ro_compat;
54 CompatSet::FeatureSet ceph_osd_feature_incompat;
55 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_BASE);
56 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_PGINFO);
57 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_OLOC);
58 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_LEC);
59 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_CATEGORIES);
60 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_HOBJECTPOOL);
61 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_BIGINFO);
62 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_LEVELDBINFO);
63 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_LEVELDBLOG);
65 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_SNAPMAPPER);
66 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_SHARDS);
68 return CompatSet(ceph_osd_feature_compat, ceph_osd_feature_ro_compat,
69 ceph_osd_feature_incompat);
73 const ssize_t max_read = 1024 * 1024;
74 const int fd_none = INT_MIN;
78 struct action_on_object_t {
79 virtual ~action_on_object_t() {}
80 virtual int call(ObjectStore *store, coll_t coll, ghobject_t &ghobj, object_info_t &oi) = 0;
83 int _action_on_all_objects_in_pg(ObjectStore *store, coll_t coll, action_on_object_t &action, bool debug)
85 unsigned LIST_AT_A_TIME = 100;
87 while (!next.is_max()) {
88 vector<ghobject_t> list;
89 int r = store->collection_list(
92 ghobject_t::get_max(),
97 cerr << "Error listing collection: " << coll << ", "
98 << cpp_strerror(r) << std::endl;
101 for (vector<ghobject_t>::iterator obj = list.begin();
104 if (obj->is_pgmeta())
107 if (coll != coll_t::meta()) {
109 r = store->getattr(coll, *obj, OI_ATTR, attr);
111 cerr << "Error getting attr on : " << make_pair(coll, *obj) << ", "
112 << cpp_strerror(r) << std::endl;
115 bufferlist::iterator bp = attr.begin();
120 cerr << "Error getting attr on : " << make_pair(coll, *obj) << ", "
121 << cpp_strerror(r) << std::endl;
125 r = action.call(store, coll, *obj, oi);
133 int action_on_all_objects_in_pg(ObjectStore *store, string pgidstr, action_on_object_t &action, bool debug)
136 // Scan collections in case this is an ec pool but no shard specified
137 unsigned scanned = 0;
139 vector<coll_t> colls_to_check;
140 vector<coll_t> candidates;
141 r = store->list_collections(candidates);
143 cerr << "Error listing collections: " << cpp_strerror(r) << std::endl;
146 pgid.parse(pgidstr.c_str());
147 for (vector<coll_t>::iterator i = candidates.begin();
148 i != candidates.end();
151 if (!i->is_pg(&cand_pgid))
154 // If an exact match or treat no shard as any shard
155 if (cand_pgid == pgid ||
156 (pgid.is_no_shard() && pgid.pgid == cand_pgid.pgid)) {
157 colls_to_check.push_back(*i);
162 cerr << colls_to_check.size() << " pgs to scan" << std::endl;
163 for (vector<coll_t>::iterator i = colls_to_check.begin();
164 i != colls_to_check.end();
167 cerr << "Scanning " << *i << ", " << scanned << "/"
168 << colls_to_check.size() << " completed" << std::endl;
169 r = _action_on_all_objects_in_pg(store, *i, action, debug);
176 int action_on_all_objects_in_exact_pg(ObjectStore *store, coll_t coll, action_on_object_t &action, bool debug)
178 int r = _action_on_all_objects_in_pg(store, coll, action, debug);
182 int _action_on_all_objects(ObjectStore *store, action_on_object_t &action, bool debug)
184 unsigned scanned = 0;
186 vector<coll_t> colls_to_check;
187 vector<coll_t> candidates;
188 r = store->list_collections(candidates);
190 cerr << "Error listing collections: " << cpp_strerror(r) << std::endl;
193 for (vector<coll_t>::iterator i = candidates.begin();
194 i != candidates.end();
197 colls_to_check.push_back(*i);
202 cerr << colls_to_check.size() << " pgs to scan" << std::endl;
203 for (vector<coll_t>::iterator i = colls_to_check.begin();
204 i != colls_to_check.end();
207 cerr << "Scanning " << *i << ", " << scanned << "/"
208 << colls_to_check.size() << " completed" << std::endl;
209 r = _action_on_all_objects_in_pg(store, *i, action, debug);
216 int action_on_all_objects(ObjectStore *store, action_on_object_t &action, bool debug)
218 int r = _action_on_all_objects(store, action, debug);
222 struct pgid_object_list {
223 list<pair<coll_t, ghobject_t> > _objects;
225 void insert(coll_t coll, ghobject_t &ghobj) {
226 _objects.push_back(make_pair(coll, ghobj));
229 void dump(Formatter *f, bool human_readable) const {
231 f->open_array_section("pgid_objects");
232 for (list<pair<coll_t, ghobject_t> >::const_iterator i = _objects.begin();
235 f->open_array_section("pgid_object");
237 bool is_pg = i->first.is_pg(&pgid);
239 f->dump_string("pgid", stringify(pgid));
240 if (!is_pg || !human_readable)
241 f->dump_string("coll", i->first.to_str());
242 f->open_object_section("ghobject");
246 if (human_readable) {
251 if (!human_readable) {
259 struct lookup_ghobject : public action_on_object_t {
260 pgid_object_list _objects;
262 const boost::optional<std::string> _namespace;
265 lookup_ghobject(const string& name, const boost::optional<std::string>& nspace, bool need_snapset = false) : _name(name),
266 _namespace(nspace), _need_snapset(need_snapset) { }
268 int call(ObjectStore *store, coll_t coll, ghobject_t &ghobj, object_info_t &oi) override {
269 if (_need_snapset && !ghobj.hobj.has_snapset())
271 if ((_name.length() == 0 || ghobj.hobj.oid.name == _name) &&
272 (!_namespace || ghobj.hobj.nspace == _namespace))
273 _objects.insert(coll, ghobj);
278 return _objects._objects.size();
281 pair<coll_t, ghobject_t> pop() {
282 pair<coll_t, ghobject_t> front = _objects._objects.front();
283 _objects._objects.pop_front();
287 void dump(Formatter *f, bool human_readable) const {
288 _objects.dump(f, human_readable);
292 ghobject_t infos_oid = OSD::make_infos_oid();
294 ghobject_t biginfo_oid;
296 int file_fd = fd_none;
301 static int get_fd_data(int fd, bufferlist &bl)
305 ssize_t bytes = bl.read_fd(fd, max_read);
307 cerr << "read_fd error " << cpp_strerror(bytes) << std::endl;
317 assert(bl.length() == total);
321 int get_log(ObjectStore *fs, __u8 struct_ver,
322 coll_t coll, spg_t pgid, const pg_info_t &info,
323 PGLog::IndexedLog &log, pg_missing_t &missing)
327 assert(struct_ver > 0);
328 PGLog::read_log_and_missing(fs, coll,
329 struct_ver >= 8 ? coll : coll_t::meta(),
330 struct_ver >= 8 ? pgid.make_pgmeta_oid() : log_oid,
334 g_ceph_context->_conf->osd_ignore_stale_divergent_priors);
335 if (debug && oss.str().size())
336 cerr << oss.str() << std::endl;
338 catch (const buffer::error &e) {
339 cerr << "read_log_and_missing threw exception error " << e.what() << std::endl;
345 void dump_log(Formatter *formatter, ostream &out, pg_log_t &log,
346 pg_missing_t &missing)
348 formatter->open_object_section("op_log");
349 formatter->open_object_section("pg_log_t");
351 formatter->close_section();
352 formatter->flush(out);
353 formatter->open_object_section("pg_missing_t");
354 missing.dump(formatter);
355 formatter->close_section();
356 formatter->flush(out);
357 formatter->open_object_section("map");
358 formatter->close_section();
359 formatter->close_section();
360 formatter->flush(out);
363 //Based on part of OSD::load_pgs()
364 int finish_remove_pgs(ObjectStore *store)
367 int r = store->list_collections(ls);
369 cerr << "finish_remove_pgs: failed to list pgs: " << cpp_strerror(r)
374 for (vector<coll_t>::iterator it = ls.begin();
379 if (it->is_temp(&pgid) ||
380 (it->is_pg(&pgid) && PG::_has_removal_flag(store, pgid))) {
381 cout << "finish_remove_pgs " << *it << " removing " << pgid << std::endl;
382 OSD::recursive_remove_collection(g_ceph_context, store, pgid, *it);
386 //cout << "finish_remove_pgs ignoring unrecognized " << *it << std::endl;
391 #pragma GCC diagnostic ignored "-Wpragmas"
392 #pragma GCC diagnostic push
393 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
395 int mark_pg_for_removal(ObjectStore *fs, spg_t pgid, ObjectStore::Transaction *t)
397 pg_info_t info(pgid);
399 ghobject_t pgmeta_oid(info.pgid.make_pgmeta_oid());
402 epoch_t map_epoch = 0;
403 int r = PG::peek_map_epoch(fs, pgid, &map_epoch, &bl);
405 cerr << __func__ << " warning: peek_map_epoch reported error" << std::endl;
406 PastIntervals past_intervals;
408 r = PG::read_info(fs, pgid, coll, bl, info, past_intervals, struct_v);
410 cerr << __func__ << " error on read_info " << cpp_strerror(r) << std::endl;
413 assert(struct_v >= 8);
415 cout << "setting '_remove' omap key" << std::endl;
416 map<string,bufferlist> values;
417 ::encode((char)1, values["_remove"]);
418 t->omap_setkeys(coll, pgmeta_oid, values);
422 #pragma GCC diagnostic pop
423 #pragma GCC diagnostic warning "-Wpragmas"
425 int initiate_new_remove_pg(ObjectStore *store, spg_t r_pgid,
426 ObjectStore::Sequencer &osr)
429 finish_remove_pgs(store);
430 if (!store->collection_exists(coll_t(r_pgid)))
433 cout << " marking collection for removal" << std::endl;
436 ObjectStore::Transaction rmt;
437 int r = mark_pg_for_removal(store, r_pgid, &rmt);
441 store->apply_transaction(&osr, std::move(rmt));
442 finish_remove_pgs(store);
446 int write_info(ObjectStore::Transaction &t, epoch_t epoch, pg_info_t &info,
447 PastIntervals &past_intervals)
450 coll_t coll(info.pgid);
451 ghobject_t pgmeta_oid(info.pgid.make_pgmeta_oid());
452 map<string,bufferlist> km;
453 pg_info_t last_written_info;
454 int ret = PG::_prepare_write_info(
461 if (ret) cerr << "Failed to write info" << std::endl;
462 t.omap_setkeys(coll, pgmeta_oid, km);
466 typedef map<eversion_t, hobject_t> divergent_priors_t;
468 int write_pg(ObjectStore::Transaction &t, epoch_t epoch, pg_info_t &info,
469 pg_log_t &log, PastIntervals &past_intervals,
470 divergent_priors_t &divergent,
471 pg_missing_t &missing)
473 int ret = write_info(t, epoch, info, past_intervals);
476 coll_t coll(info.pgid);
477 map<string,bufferlist> km;
479 if (!divergent.empty()) {
480 assert(missing.get_items().empty());
481 PGLog::write_log_and_missing_wo_missing(
482 t, &km, log, coll, info.pgid.make_pgmeta_oid(), divergent, true);
484 pg_missing_tracker_t tmissing(missing);
485 bool rebuilt_missing_set_with_deletes = missing.may_include_deletes;
486 PGLog::write_log_and_missing(
487 t, &km, log, coll, info.pgid.make_pgmeta_oid(), tmissing, true,
488 &rebuilt_missing_set_with_deletes);
490 t.omap_setkeys(coll, info.pgid.make_pgmeta_oid(), km);
494 const int OMAP_BATCH_SIZE = 25;
495 void get_omap_batch(ObjectMap::ObjectMapIterator &iter, map<string, bufferlist> &oset)
498 for (int count = OMAP_BATCH_SIZE; count && iter->valid(); --count, iter->next()) {
499 oset.insert(pair<string, bufferlist>(iter->key(), iter->value()));
503 int ObjectStoreTool::export_file(ObjectStore *store, coll_t cid, ghobject_t &obj)
509 int ret = store->stat(cid, obj, &st);
513 cerr << "Read " << obj << std::endl;
517 cerr << "size=" << total << std::endl;
519 object_begin objb(obj);
524 ret = store->getattr(cid, obj, OI_ATTR, bp);
526 cerr << "getattr failure object_info " << ret << std::endl;
532 cerr << "object_info: " << objb.oi << std::endl;
535 // NOTE: we include whiteouts, lost, etc.
537 ret = write_section(TYPE_OBJECT_BEGIN, objb, file_fd);
542 bufferlist rawdatabl;
545 mysize_t len = max_read;
549 ret = store->read(cid, obj, offset, len, rawdatabl);
555 data_section dblock(offset, len, rawdatabl);
557 cerr << "data section offset=" << offset << " len=" << len << std::endl;
562 ret = write_section(TYPE_DATA, dblock, file_fd);
566 //Handle attrs for this object
567 map<string,bufferptr> aset;
568 ret = store->getattrs(cid, obj, aset);
570 attr_section as(aset);
571 ret = write_section(TYPE_ATTRS, as, file_fd);
576 cerr << "attrs size " << aset.size() << std::endl;
579 //Handle omap information
581 ret = store->omap_get_header(cid, obj, &hdrbuf, true);
583 cerr << "omap_get_header: " << cpp_strerror(ret) << std::endl;
587 omap_hdr_section ohs(hdrbuf);
588 ret = write_section(TYPE_OMAP_HDR, ohs, file_fd);
592 ObjectMap::ObjectMapIterator iter = store->get_omap_iterator(cid, obj);
595 cerr << "omap_get_iterator: " << cpp_strerror(ret) << std::endl;
598 iter->seek_to_first();
600 map<string, bufferlist> out;
601 while(iter->valid()) {
602 get_omap_batch(iter, out);
604 if (out.empty()) break;
606 mapcount += out.size();
607 omap_section oms(out);
608 ret = write_section(TYPE_OMAP, oms, file_fd);
613 cerr << "omap map size " << mapcount << std::endl;
615 ret = write_simple(TYPE_OBJECT_END, file_fd);
622 int ObjectStoreTool::export_files(ObjectStore *store, coll_t coll)
626 while (!next.is_max()) {
627 vector<ghobject_t> objects;
628 int r = store->collection_list(coll, next, ghobject_t::get_max(), 300,
632 for (vector<ghobject_t>::iterator i = objects.begin();
635 assert(!i->hobj.is_meta());
636 if (i->is_pgmeta() || i->hobj.is_temp()) {
639 r = export_file(store, coll, *i);
647 int set_inc_osdmap(ObjectStore *store, epoch_t e, bufferlist& bl, bool force,
648 ObjectStore::Sequencer &osr) {
649 OSDMap::Incremental inc;
650 bufferlist::iterator it = bl.begin();
654 } else if (e != inc.epoch) {
655 cerr << "incremental.epoch mismatch: "
656 << inc.epoch << " != " << e << std::endl;
658 cerr << "But will continue anyway." << std::endl;
663 const ghobject_t inc_oid = OSD::get_inc_osdmap_pobject_name(e);
664 if (!store->exists(coll_t::meta(), inc_oid)) {
665 cerr << "inc-osdmap (" << inc_oid << ") does not exist." << std::endl;
669 cout << "Creating a new epoch." << std::endl;
673 ObjectStore::Transaction t;
674 t.write(coll_t::meta(), inc_oid, 0, bl.length(), bl);
675 t.truncate(coll_t::meta(), inc_oid, bl.length());
676 int ret = store->apply_transaction(&osr, std::move(t));
678 cerr << "Failed to set inc-osdmap (" << inc_oid << "): " << ret << std::endl;
680 cout << "Wrote inc-osdmap." << inc.epoch << std::endl;
685 int get_inc_osdmap(ObjectStore *store, epoch_t e, bufferlist& bl)
687 if (store->read(coll_t::meta(),
688 OSD::get_inc_osdmap_pobject_name(e),
695 int set_osdmap(ObjectStore *store, epoch_t e, bufferlist& bl, bool force,
696 ObjectStore::Sequencer &osr) {
700 e = osdmap.get_epoch();
701 } else if (e != osdmap.get_epoch()) {
702 cerr << "osdmap.epoch mismatch: "
703 << e << " != " << osdmap.get_epoch() << std::endl;
705 cerr << "But will continue anyway." << std::endl;
710 const ghobject_t full_oid = OSD::get_osdmap_pobject_name(e);
711 if (!store->exists(coll_t::meta(), full_oid)) {
712 cerr << "osdmap (" << full_oid << ") does not exist." << std::endl;
716 cout << "Creating a new epoch." << std::endl;
720 ObjectStore::Transaction t;
721 t.write(coll_t::meta(), full_oid, 0, bl.length(), bl);
722 t.truncate(coll_t::meta(), full_oid, bl.length());
723 int ret = store->apply_transaction(&osr, std::move(t));
725 cerr << "Failed to set osdmap (" << full_oid << "): " << ret << std::endl;
727 cout << "Wrote osdmap." << osdmap.get_epoch() << std::endl;
732 int get_osdmap(ObjectStore *store, epoch_t e, OSDMap &osdmap, bufferlist& bl)
734 bool found = store->read(
735 coll_t::meta(), OSD::get_osdmap_pobject_name(e), 0, 0, bl) >= 0;
737 cerr << "Can't find OSDMap for pg epoch " << e << std::endl;
742 cerr << osdmap << std::endl;
746 int add_osdmap(ObjectStore *store, metadata_section &ms)
748 return get_osdmap(store, ms.map_epoch, ms.osdmap, ms.osdmap_bl);
751 int ObjectStoreTool::do_export(ObjectStore *fs, coll_t coll, spg_t pgid,
752 pg_info_t &info, epoch_t map_epoch, __u8 struct_ver,
753 const OSDSuperblock& superblock,
754 PastIntervals &past_intervals)
756 PGLog::IndexedLog log;
757 pg_missing_t missing;
759 cerr << "Exporting " << pgid << std::endl;
761 int ret = get_log(fs, struct_ver, coll, pgid, info, log, missing);
766 Formatter *formatter = Formatter::create("json-pretty");
768 dump_log(formatter, cerr, log, missing);
773 pg_begin pgb(pgid, superblock);
774 // Special case: If replicated pg don't require the importing OSD to have shard feature
775 if (pgid.is_no_shard()) {
776 pgb.superblock.compat_features.incompat.remove(CEPH_OSD_FEATURE_INCOMPAT_SHARDS);
778 ret = write_section(TYPE_PG_BEGIN, pgb, file_fd);
782 // The metadata_section is now before files, so import can detect
783 // errors and abort without wasting time.
791 ret = add_osdmap(fs, ms);
794 ret = write_section(TYPE_PG_METADATA, ms, file_fd);
798 ret = export_files(fs, coll);
800 cerr << "export_files error " << ret << std::endl;
804 ret = write_simple(TYPE_PG_END, file_fd);
811 int get_data(ObjectStore *store, coll_t coll, ghobject_t hoid,
812 ObjectStore::Transaction *t, bufferlist &bl)
814 bufferlist::iterator ebliter = bl.begin();
819 cerr << "\tdata: offset " << ds.offset << " len " << ds.len << std::endl;
820 t->write(coll, hoid, ds.offset, ds.len, ds.databl);
825 ObjectStore *store, coll_t coll, ghobject_t hoid,
826 ObjectStore::Transaction *t, bufferlist &bl,
827 OSDriver &driver, SnapMapper &snap_mapper)
829 bufferlist::iterator ebliter = bl.begin();
834 cerr << "\tattrs: len " << as.data.size() << std::endl;
835 t->setattrs(coll, hoid, as.data);
837 // This could have been handled in the caller if we didn't need to
838 // support exports that didn't include object_info_t in object_begin.
839 if (hoid.generation == ghobject_t::NO_GEN) {
840 if (hoid.hobj.snap < CEPH_MAXSNAP) {
841 map<string,bufferlist>::iterator mi = as.data.find(OI_ATTR);
842 if (mi != as.data.end()) {
843 object_info_t oi(mi->second);
846 cerr << "object_info " << oi << std::endl;
848 OSDriver::OSTransaction _t(driver.get_transaction(t));
849 set<snapid_t> oi_snaps(oi.legacy_snaps.begin(), oi.legacy_snaps.end());
850 if (!oi_snaps.empty()) {
852 cerr << "\tsetting legacy snaps " << oi_snaps << std::endl;
853 snap_mapper.add_oid(hoid.hobj, oi_snaps, &_t);
857 if (hoid.hobj.is_head()) {
858 map<string,bufferlist>::iterator mi = as.data.find(SS_ATTR);
859 if (mi != as.data.end()) {
861 auto p = mi->second.begin();
863 cout << "snapset " << snapset << std::endl;
864 if (!snapset.is_legacy()) {
865 for (auto& p : snapset.clone_snaps) {
866 ghobject_t clone = hoid;
867 clone.hobj.snap = p.first;
868 set<snapid_t> snaps(p.second.begin(), p.second.end());
869 if (!store->exists(coll, clone)) {
870 // no clone, skip. this is probably a cache pool. this works
871 // because we use a separate transaction per object and clones
872 // come before head in the archive.
874 cerr << "\tskipping missing " << clone << " (snaps "
875 << snaps << ")" << std::endl;
879 cerr << "\tsetting " << clone.hobj << " snaps " << snaps
881 OSDriver::OSTransaction _t(driver.get_transaction(t));
882 assert(!snaps.empty());
883 snap_mapper.add_oid(clone.hobj, snaps, &_t);
887 cerr << "missing SS_ATTR on " << hoid << std::endl;
896 int get_omap_hdr(ObjectStore *store, coll_t coll, ghobject_t hoid,
897 ObjectStore::Transaction *t, bufferlist &bl)
899 bufferlist::iterator ebliter = bl.begin();
904 cerr << "\tomap header: " << string(oh.hdr.c_str(), oh.hdr.length())
906 t->omap_setheader(coll, hoid, oh.hdr);
910 int get_omap(ObjectStore *store, coll_t coll, ghobject_t hoid,
911 ObjectStore::Transaction *t, bufferlist &bl)
913 bufferlist::iterator ebliter = bl.begin();
918 cerr << "\tomap: size " << os.omap.size() << std::endl;
919 t->omap_setkeys(coll, hoid, os.omap);
923 int ObjectStoreTool::get_object(ObjectStore *store, coll_t coll,
924 bufferlist &bl, OSDMap &curmap,
925 bool *skipped_objects,
926 ObjectStore::Sequencer &osr)
928 ObjectStore::Transaction tran;
929 ObjectStore::Transaction *t = &tran;
930 bufferlist::iterator ebliter = bl.begin();
936 OSD::make_snapmapper_oid());
938 coll.is_pg_prefix(&pg);
939 SnapMapper mapper(g_ceph_context, &driver, 0, 0, 0, pg.shard);
941 if (ob.hoid.hobj.is_temp()) {
942 cerr << "ERROR: Export contains temporary object '" << ob.hoid << "'" << std::endl;
945 assert(g_ceph_context);
946 if (ob.hoid.hobj.nspace != g_ceph_context->_conf->osd_hit_set_namespace) {
947 object_t oid = ob.hoid.hobj.oid;
948 object_locator_t loc(ob.hoid.hobj);
949 pg_t raw_pgid = curmap.object_locator_to_pg(oid, loc);
950 pg_t pgid = curmap.raw_pg_to_pg(raw_pgid);
953 if (coll.is_pg(&coll_pgid) == false) {
954 cerr << "INTERNAL ERROR: Bad collection during import" << std::endl;
957 if (coll_pgid.shard != ob.hoid.shard_id) {
958 cerr << "INTERNAL ERROR: Importing shard " << coll_pgid.shard
959 << " but object shard is " << ob.hoid.shard_id << std::endl;
963 if (coll_pgid.pgid != pgid) {
964 cerr << "Skipping object '" << ob.hoid << "' which belongs in pg " << pgid << std::endl;
965 *skipped_objects = true;
972 t->touch(coll, ob.hoid);
974 cout << "Write " << ob.hoid << std::endl;
980 int ret = read_section(&type, &ebl);
984 //cout << "\tdo_object: Section type " << hex << type << dec << std::endl;
985 //cout << "\t\tsection size " << ebl.length() << std::endl;
986 if (type >= END_OF_TYPES) {
987 cout << "Skipping unknown object section type" << std::endl;
993 ret = get_data(store, coll, ob.hoid, t, ebl);
998 ret = get_attrs(store, coll, ob.hoid, t, ebl, driver, mapper);
1003 ret = get_omap_hdr(store, coll, ob.hoid, t, ebl);
1004 if (ret) return ret;
1008 ret = get_omap(store, coll, ob.hoid, t, ebl);
1009 if (ret) return ret;
1011 case TYPE_OBJECT_END:
1015 cerr << "Unknown section type " << type << std::endl;
1020 store->apply_transaction(&osr, std::move(*t));
1024 int get_pg_metadata(ObjectStore *store, bufferlist &bl, metadata_section &ms,
1025 const OSDSuperblock& sb, OSDMap& curmap, spg_t pgid)
1027 bufferlist::iterator ebliter = bl.begin();
1029 spg_t old_pgid = ms.info.pgid;
1030 ms.info.pgid = pgid;
1033 Formatter *formatter = new JSONFormatter(true);
1034 cout << "export pgid " << old_pgid << std::endl;
1035 cout << "struct_v " << (int)ms.struct_ver << std::endl;
1036 cout << "map epoch " << ms.map_epoch << std::endl;
1038 formatter->open_object_section("importing OSDMap");
1039 ms.osdmap.dump(formatter);
1040 formatter->close_section();
1041 formatter->flush(cout);
1044 cout << "osd current epoch " << sb.current_epoch << std::endl;
1045 formatter->open_object_section("current OSDMap");
1046 curmap.dump(formatter);
1047 formatter->close_section();
1048 formatter->flush(cout);
1051 formatter->open_object_section("info");
1052 ms.info.dump(formatter);
1053 formatter->close_section();
1054 formatter->flush(cout);
1057 formatter->open_object_section("log");
1058 ms.log.dump(formatter);
1059 formatter->close_section();
1060 formatter->flush(cout);
1063 formatter->flush(cout);
1067 if (ms.osdmap.get_epoch() != 0 && ms.map_epoch != ms.osdmap.get_epoch()) {
1068 cerr << "FATAL: Invalid OSDMap epoch in export data" << std::endl;
1072 if (ms.map_epoch > sb.current_epoch) {
1073 cerr << "ERROR: Export PG's map_epoch " << ms.map_epoch << " > OSD's epoch " << sb.current_epoch << std::endl;
1074 cerr << "The OSD you are using is older than the exported PG" << std::endl;
1075 cerr << "Either use another OSD or join selected OSD to cluster to update it first" << std::endl;
1079 // Pool verified to exist for call to get_pg_num().
1080 unsigned new_pg_num = curmap.get_pg_num(pgid.pgid.pool());
1082 if (pgid.pgid.ps() >= new_pg_num) {
1083 cerr << "Illegal pgid, the seed is larger than current pg_num" << std::endl;
1087 // Old exports didn't include OSDMap, see if we have a copy locally
1088 if (ms.osdmap.get_epoch() == 0) {
1090 bufferlist findmap_bl;
1091 int ret = get_osdmap(store, ms.map_epoch, findmap, findmap_bl);
1093 ms.osdmap.deepish_copy_from(findmap);
1095 cerr << "WARNING: No OSDMap in old export,"
1096 " some objects may be ignored due to a split" << std::endl;
1100 // Make sure old_pg_num is 0 in the unusual case that OSDMap not in export
1101 // nor can we find a local copy.
1102 unsigned old_pg_num = 0;
1103 if (ms.osdmap.get_epoch() != 0)
1104 old_pg_num = ms.osdmap.get_pg_num(pgid.pgid.pool());
1107 cerr << "old_pg_num " << old_pg_num << std::endl;
1108 cerr << "new_pg_num " << new_pg_num << std::endl;
1109 cerr << ms.osdmap << std::endl;
1110 cerr << curmap << std::endl;
1113 // If we have managed to have a good OSDMap we can do these checks
1115 if (old_pgid.pgid.ps() >= old_pg_num) {
1116 cerr << "FATAL: pgid invalid for original map epoch" << std::endl;
1119 if (pgid.pgid.ps() >= old_pg_num) {
1120 cout << "NOTICE: Post split pgid specified" << std::endl;
1123 if (parent.is_split(old_pg_num, new_pg_num, NULL)) {
1124 cerr << "WARNING: Split occurred, some objects may be ignored" << std::endl;
1130 cerr << "Import pgid " << ms.info.pgid << std::endl;
1131 cerr << "Clearing past_intervals " << ms.past_intervals << std::endl;
1132 cerr << "Zero same_interval_since " << ms.info.history.same_interval_since << std::endl;
1135 // Let osd recompute past_intervals and same_interval_since
1136 ms.past_intervals.clear();
1137 ms.info.history.same_interval_since = 0;
1140 cerr << "Changing pg epoch " << ms.map_epoch << " to " << sb.current_epoch << std::endl;
1142 ms.map_epoch = sb.current_epoch;
1147 // out: pg_log_t that only has entries that apply to import_pgid using curmap
1148 // reject: Entries rejected from "in" are in the reject.log. Other fields not set.
1149 void filter_divergent_priors(spg_t import_pgid, const OSDMap &curmap,
1150 const string &hit_set_namespace, const divergent_priors_t &in,
1151 divergent_priors_t &out, divergent_priors_t &reject)
1156 for (divergent_priors_t::const_iterator i = in.begin();
1157 i != in.end(); ++i) {
1159 // Reject divergent priors for temporary objects
1160 if (i->second.is_temp()) {
1165 if (i->second.nspace != hit_set_namespace) {
1166 object_t oid = i->second.oid;
1167 object_locator_t loc(i->second);
1168 pg_t raw_pgid = curmap.object_locator_to_pg(oid, loc);
1169 pg_t pgid = curmap.raw_pg_to_pg(raw_pgid);
1171 if (import_pgid.pgid == pgid) {
1182 int ObjectStoreTool::do_import(ObjectStore *store, OSDSuperblock& sb,
1183 bool force, std::string pgidstr,
1184 ObjectStore::Sequencer &osr)
1188 PGLog::IndexedLog log;
1189 bool skipped_objects = false;
1192 finish_remove_pgs(store);
1194 int ret = read_super();
1198 if (sh.magic != super_header::super_magic) {
1199 cerr << "Invalid magic number" << std::endl;
1203 if (sh.version > super_header::super_ver) {
1204 cerr << "Can't handle export format version=" << sh.version << std::endl;
1208 //First section must be TYPE_PG_BEGIN
1210 ret = read_section(&type, &ebl);
1213 if (type == TYPE_POOL_BEGIN) {
1214 cerr << "Pool exports cannot be imported into a PG" << std::endl;
1216 } else if (type != TYPE_PG_BEGIN) {
1217 cerr << "Invalid first section type " << type << std::endl;
1221 bufferlist::iterator ebliter = ebl.begin();
1223 pgb.decode(ebliter);
1224 spg_t pgid = pgb.pgid;
1225 spg_t orig_pgid = pgid;
1227 if (pgidstr.length()) {
1230 bool ok = user_pgid.parse(pgidstr.c_str());
1231 // This succeeded in main() already
1233 if (pgid != user_pgid) {
1234 if (pgid.pool() != user_pgid.pool()) {
1235 cerr << "Can't specify a different pgid pool, must be " << pgid.pool() << std::endl;
1238 if (pgid.is_no_shard() && !user_pgid.is_no_shard()) {
1239 cerr << "Can't specify a sharded pgid with a non-sharded export" << std::endl;
1242 // Get shard from export information if not specified
1243 if (!pgid.is_no_shard() && user_pgid.is_no_shard()) {
1244 user_pgid.shard = pgid.shard;
1246 if (pgid.shard != user_pgid.shard) {
1247 cerr << "Can't specify a different shard, must be " << pgid.shard << std::endl;
1254 if (!pgb.superblock.cluster_fsid.is_zero()
1255 && pgb.superblock.cluster_fsid != sb.cluster_fsid) {
1256 cerr << "Export came from different cluster with fsid "
1257 << pgb.superblock.cluster_fsid << std::endl;
1262 cerr << "Exported features: " << pgb.superblock.compat_features << std::endl;
1265 // Special case: Old export has SHARDS incompat feature on replicated pg, remove it
1266 if (pgid.is_no_shard())
1267 pgb.superblock.compat_features.incompat.remove(CEPH_OSD_FEATURE_INCOMPAT_SHARDS);
1269 if (sb.compat_features.compare(pgb.superblock.compat_features) == -1) {
1270 CompatSet unsupported = sb.compat_features.unsupported(pgb.superblock.compat_features);
1272 cerr << "Export has incompatible features set " << unsupported << std::endl;
1274 // Let them import if they specify the --force option
1276 return 11; // Positive return means exit status
1279 // Don't import if pool no longer exists
1282 ret = get_osdmap(store, sb.current_epoch, curmap, bl);
1284 cerr << "Can't find local OSDMap" << std::endl;
1287 if (!curmap.have_pg_pool(pgid.pgid.m_pool)) {
1288 cerr << "Pool " << pgid.pgid.m_pool << " no longer exists" << std::endl;
1289 // Special exit code for this error, used by test code
1290 return 10; // Positive return means exit status
1293 ghobject_t pgmeta_oid = pgid.make_pgmeta_oid();
1294 log_oid = OSD::make_pg_log_oid(pgid);
1295 biginfo_oid = OSD::make_pg_biginfo_oid(pgid);
1297 //Check for PG already present.
1299 if (store->collection_exists(coll)) {
1300 cerr << "pgid " << pgid << " already exists" << std::endl;
1305 ObjectStore::Transaction t;
1306 PG::_create(t, pgid,
1307 pgid.get_split_bits(curmap.get_pg_pool(pgid.pool())->get_pg_num()));
1308 PG::_init(t, pgid, NULL);
1310 // mark this coll for removal until we're done
1311 map<string,bufferlist> values;
1312 ::encode((char)1, values["_remove"]);
1313 t.omap_setkeys(coll, pgid.make_pgmeta_oid(), values);
1315 store->apply_transaction(&osr, std::move(t));
1318 cout << "Importing pgid " << pgid;
1319 if (orig_pgid != pgid) {
1320 cout << " exported as " << orig_pgid;
1325 bool found_metadata = false;
1326 metadata_section ms;
1328 ret = read_section(&type, &ebl);
1332 //cout << "do_import: Section type " << hex << type << dec << std::endl;
1333 if (type >= END_OF_TYPES) {
1334 cout << "Skipping unknown section type" << std::endl;
1338 case TYPE_OBJECT_BEGIN:
1339 ret = get_object(store, coll, ebl, curmap, &skipped_objects, osr);
1340 if (ret) return ret;
1342 case TYPE_PG_METADATA:
1343 ret = get_pg_metadata(store, ebl, ms, sb, curmap, pgid);
1344 if (ret) return ret;
1345 found_metadata = true;
1351 cerr << "Unknown section type " << type << std::endl;
1356 if (!found_metadata) {
1357 cerr << "Missing metadata section" << std::endl;
1361 ObjectStore::Transaction t;
1363 pg_log_t newlog, reject;
1364 pg_log_t::filter_log(pgid, curmap, g_ceph_context->_conf->osd_hit_set_namespace,
1365 ms.log, newlog, reject);
1367 for (list<pg_log_entry_t>::iterator i = newlog.log.begin();
1368 i != newlog.log.end(); ++i)
1369 cerr << "Keeping log entry " << *i << std::endl;
1370 for (list<pg_log_entry_t>::iterator i = reject.log.begin();
1371 i != reject.log.end(); ++i)
1372 cerr << "Skipping log entry " << *i << std::endl;
1375 divergent_priors_t newdp, rejectdp;
1376 filter_divergent_priors(pgid, curmap, g_ceph_context->_conf->osd_hit_set_namespace,
1377 ms.divergent_priors, newdp, rejectdp);
1378 ms.divergent_priors = newdp;
1380 for (divergent_priors_t::iterator i = newdp.begin();
1381 i != newdp.end(); ++i)
1382 cerr << "Keeping divergent_prior " << *i << std::endl;
1383 for (divergent_priors_t::iterator i = rejectdp.begin();
1384 i != rejectdp.end(); ++i)
1385 cerr << "Skipping divergent_prior " << *i << std::endl;
1388 ms.missing.filter_objects([&](const hobject_t &obj) {
1389 if (obj.nspace == g_ceph_context->_conf->osd_hit_set_namespace)
1391 assert(!obj.is_temp());
1392 object_t oid = obj.oid;
1393 object_locator_t loc(obj);
1394 pg_t raw_pgid = curmap.object_locator_to_pg(oid, loc);
1395 pg_t _pgid = curmap.raw_pg_to_pg(raw_pgid);
1397 return pgid.pgid != _pgid;
1402 pg_missing_t missing;
1403 Formatter *formatter = Formatter::create("json-pretty");
1404 dump_log(formatter, cerr, newlog, ms.missing);
1408 // Just like a split invalidate stats since the object count is changed
1409 if (skipped_objects)
1410 ms.info.stats.stats_invalid = true;
1418 ms.divergent_priors,
1420 if (ret) return ret;
1423 // done, clear removal flag
1425 cerr << "done, clearing removal flag" << std::endl;
1429 remove.insert("_remove");
1430 t.omap_rmkeys(coll, pgid.make_pgmeta_oid(), remove);
1431 store->apply_transaction(&osr, std::move(t));
1437 int do_list(ObjectStore *store, string pgidstr, string object, boost::optional<std::string> nspace,
1438 Formatter *formatter, bool debug, bool human_readable, bool head)
1441 lookup_ghobject lookup(object, nspace, head);
1442 if (pgidstr.length() > 0) {
1443 r = action_on_all_objects_in_pg(store, pgidstr, lookup, debug);
1445 r = action_on_all_objects(store, lookup, debug);
1449 lookup.dump(formatter, human_readable);
1450 formatter->flush(cout);
1454 int do_meta(ObjectStore *store, string object, Formatter *formatter, bool debug, bool human_readable)
1457 boost::optional<std::string> nspace; // Not specified
1458 lookup_ghobject lookup(object, nspace);
1459 r = action_on_all_objects_in_exact_pg(store, coll_t::meta(), lookup, debug);
1462 lookup.dump(formatter, human_readable);
1463 formatter->flush(cout);
1467 int remove_object(coll_t coll, ghobject_t &ghobj,
1469 MapCacher::Transaction<std::string, bufferlist> *_t,
1470 ObjectStore::Transaction *t)
1472 int r = mapper.remove_oid(ghobj.hobj, _t);
1473 if (r < 0 && r != -ENOENT) {
1474 cerr << "remove_oid returned " << cpp_strerror(r) << std::endl;
1478 t->remove(coll, ghobj);
1482 int get_snapset(ObjectStore *store, coll_t coll, ghobject_t &ghobj, SnapSet &ss, bool silent);
1484 int do_remove_object(ObjectStore *store, coll_t coll,
1485 ghobject_t &ghobj, bool all, bool force,
1486 ObjectStore::Sequencer &osr)
1489 coll.is_pg_prefix(&pg);
1493 OSD::make_snapmapper_oid());
1494 SnapMapper mapper(g_ceph_context, &driver, 0, 0, 0, pg.shard);
1497 int r = store->stat(coll, ghobj, &st);
1499 cerr << "remove: " << cpp_strerror(r) << std::endl;
1504 if (ghobj.hobj.has_snapset()) {
1505 r = get_snapset(store, coll, ghobj, ss, false);
1507 cerr << "Can't get snapset error " << cpp_strerror(r) << std::endl;
1510 if (!ss.snaps.empty() && !all) {
1512 cout << "WARNING: only removing "
1513 << (ghobj.hobj.is_head() ? "head" : "snapdir")
1514 << " with snapshots present" << std::endl;
1517 cerr << "Snapshots are present, use removeall to delete everything" << std::endl;
1523 ObjectStore::Transaction t;
1524 OSDriver::OSTransaction _t(driver.get_transaction(&t));
1526 cout << "remove " << ghobj << std::endl;
1529 r = remove_object(coll, ghobj, mapper, &_t, &t);
1534 ghobject_t snapobj = ghobj;
1535 for (vector<snapid_t>::iterator i = ss.snaps.begin() ;
1536 i != ss.snaps.end() ; ++i) {
1537 snapobj.hobj.snap = *i;
1538 cout << "remove " << snapobj << std::endl;
1540 r = remove_object(coll, snapobj, mapper, &_t, &t);
1547 store->apply_transaction(&osr, std::move(t));
1552 int do_list_attrs(ObjectStore *store, coll_t coll, ghobject_t &ghobj)
1554 map<string,bufferptr> aset;
1555 int r = store->getattrs(coll, ghobj, aset);
1557 cerr << "getattrs: " << cpp_strerror(r) << std::endl;
1561 for (map<string,bufferptr>::iterator i = aset.begin();i != aset.end(); ++i) {
1562 string key(i->first);
1564 key = cleanbin(key);
1565 cout << key << std::endl;
1570 int do_list_omap(ObjectStore *store, coll_t coll, ghobject_t &ghobj)
1572 ObjectMap::ObjectMapIterator iter = store->get_omap_iterator(coll, ghobj);
1574 cerr << "omap_get_iterator: " << cpp_strerror(ENOENT) << std::endl;
1577 iter->seek_to_first();
1578 map<string, bufferlist> oset;
1579 while(iter->valid()) {
1580 get_omap_batch(iter, oset);
1582 for (map<string,bufferlist>::iterator i = oset.begin();i != oset.end(); ++i) {
1583 string key(i->first);
1585 key = cleanbin(key);
1586 cout << key << std::endl;
1592 int do_get_bytes(ObjectStore *store, coll_t coll, ghobject_t &ghobj, int fd)
1597 int ret = store->stat(coll, ghobj, &st);
1599 cerr << "get-bytes: " << cpp_strerror(ret) << std::endl;
1605 cerr << "size=" << total << std::endl;
1607 uint64_t offset = 0;
1608 bufferlist rawdatabl;
1611 mysize_t len = max_read;
1615 ret = store->read(coll, ghobj, offset, len, rawdatabl);
1622 cerr << "data section offset=" << offset << " len=" << len << std::endl;
1627 ret = write(fd, rawdatabl.c_str(), ret);
1637 int do_set_bytes(ObjectStore *store, coll_t coll,
1638 ghobject_t &ghobj, int fd,
1639 ObjectStore::Sequencer &osr)
1641 ObjectStore::Transaction tran;
1642 ObjectStore::Transaction *t = &tran;
1645 cerr << "Write " << ghobj << std::endl;
1648 t->touch(coll, ghobj);
1649 t->truncate(coll, ghobj, 0);
1652 uint64_t offset = 0;
1653 bufferlist rawdatabl;
1656 ssize_t bytes = rawdatabl.read_fd(fd, max_read);
1658 cerr << "read_fd error " << cpp_strerror(bytes) << std::endl;
1666 cerr << "\tdata: offset " << offset << " bytes " << bytes << std::endl;
1668 t->write(coll, ghobj, offset, bytes, rawdatabl);
1671 // XXX: Should we apply_transaction() every once in a while for very large files
1675 store->apply_transaction(&osr, std::move(*t));
1679 int do_get_attr(ObjectStore *store, coll_t coll, ghobject_t &ghobj, string key)
1683 int r = store->getattr(coll, ghobj, key.c_str(), bp);
1685 cerr << "getattr: " << cpp_strerror(r) << std::endl;
1689 string value(bp.c_str(), bp.length());
1691 value = cleanbin(value);
1692 value.push_back('\n');
1699 int do_set_attr(ObjectStore *store, coll_t coll,
1700 ghobject_t &ghobj, string key, int fd,
1701 ObjectStore::Sequencer &osr)
1703 ObjectStore::Transaction tran;
1704 ObjectStore::Transaction *t = &tran;
1708 cerr << "Setattr " << ghobj << std::endl;
1710 int ret = get_fd_data(fd, bl);
1717 t->touch(coll, ghobj);
1719 t->setattr(coll, ghobj, key, bl);
1721 store->apply_transaction(&osr, std::move(*t));
1725 int do_rm_attr(ObjectStore *store, coll_t coll,
1726 ghobject_t &ghobj, string key,
1727 ObjectStore::Sequencer &osr)
1729 ObjectStore::Transaction tran;
1730 ObjectStore::Transaction *t = &tran;
1733 cerr << "Rmattr " << ghobj << std::endl;
1738 t->rmattr(coll, ghobj, key);
1740 store->apply_transaction(&osr, std::move(*t));
1744 int do_get_omap(ObjectStore *store, coll_t coll, ghobject_t &ghobj, string key)
1747 map<string, bufferlist> out;
1751 int r = store->omap_get_values(coll, ghobj, keys, &out);
1753 cerr << "omap_get_values: " << cpp_strerror(r) << std::endl;
1758 cerr << "Key not found" << std::endl;
1762 assert(out.size() == 1);
1764 bufferlist bl = out.begin()->second;
1765 string value(bl.c_str(), bl.length());
1767 value = cleanbin(value);
1768 value.push_back('\n');
1775 int do_set_omap(ObjectStore *store, coll_t coll,
1776 ghobject_t &ghobj, string key, int fd,
1777 ObjectStore::Sequencer &osr)
1779 ObjectStore::Transaction tran;
1780 ObjectStore::Transaction *t = &tran;
1781 map<string, bufferlist> attrset;
1785 cerr << "Set_omap " << ghobj << std::endl;
1787 int ret = get_fd_data(fd, valbl);
1791 attrset.insert(pair<string, bufferlist>(key, valbl));
1796 t->touch(coll, ghobj);
1798 t->omap_setkeys(coll, ghobj, attrset);
1800 store->apply_transaction(&osr, std::move(*t));
1804 int do_rm_omap(ObjectStore *store, coll_t coll,
1805 ghobject_t &ghobj, string key,
1806 ObjectStore::Sequencer &osr)
1808 ObjectStore::Transaction tran;
1809 ObjectStore::Transaction *t = &tran;
1815 cerr << "Rm_omap " << ghobj << std::endl;
1820 t->omap_rmkeys(coll, ghobj, keys);
1822 store->apply_transaction(&osr, std::move(*t));
1826 int do_get_omaphdr(ObjectStore *store, coll_t coll, ghobject_t &ghobj)
1830 int r = store->omap_get_header(coll, ghobj, &hdrbl, true);
1832 cerr << "omap_get_header: " << cpp_strerror(r) << std::endl;
1836 string header(hdrbl.c_str(), hdrbl.length());
1838 header = cleanbin(header);
1839 header.push_back('\n');
1846 int do_set_omaphdr(ObjectStore *store, coll_t coll,
1847 ghobject_t &ghobj, int fd,
1848 ObjectStore::Sequencer &osr)
1850 ObjectStore::Transaction tran;
1851 ObjectStore::Transaction *t = &tran;
1855 cerr << "Omap_setheader " << ghobj << std::endl;
1857 int ret = get_fd_data(fd, hdrbl);
1864 t->touch(coll, ghobj);
1866 t->omap_setheader(coll, ghobj, hdrbl);
1868 store->apply_transaction(&osr, std::move(*t));
1872 struct do_fix_lost : public action_on_object_t {
1873 ObjectStore::Sequencer *osr;
1875 explicit do_fix_lost(ObjectStore::Sequencer *_osr) : osr(_osr) {}
1877 int call(ObjectStore *store, coll_t coll,
1878 ghobject_t &ghobj, object_info_t &oi) override {
1880 cout << coll << "/" << ghobj << " is lost";
1886 oi.clear_flag(object_info_t::FLAG_LOST);
1888 ::encode(oi, bl, -1); /* fixme: using full features */
1889 ObjectStore::Transaction t;
1890 t.setattr(coll, ghobj, OI_ATTR, bl);
1891 int r = store->apply_transaction(osr, std::move(t));
1893 cerr << "Error getting fixing attr on : " << make_pair(coll, ghobj)
1895 << cpp_strerror(r) << std::endl;
1903 int get_snapset(ObjectStore *store, coll_t coll, ghobject_t &ghobj, SnapSet &ss, bool silent = false)
1906 int r = store->getattr(coll, ghobj, SS_ATTR, attr);
1909 cerr << "Error getting snapset on : " << make_pair(coll, ghobj) << ", "
1910 << cpp_strerror(r) << std::endl;
1913 bufferlist::iterator bp = attr.begin();
1918 cerr << "Error decoding snapset on : " << make_pair(coll, ghobj) << ", "
1919 << cpp_strerror(r) << std::endl;
1925 int print_obj_info(ObjectStore *store, coll_t coll, ghobject_t &ghobj, Formatter* formatter)
1928 formatter->open_object_section("obj");
1929 formatter->open_object_section("id");
1930 ghobj.dump(formatter);
1931 formatter->close_section();
1934 int gr = store->getattr(coll, ghobj, OI_ATTR, attr);
1937 cerr << "Error getting attr on : " << make_pair(coll, ghobj) << ", "
1938 << cpp_strerror(r) << std::endl;
1941 bufferlist::iterator bp = attr.begin();
1944 formatter->open_object_section("info");
1946 formatter->close_section();
1949 cerr << "Error decoding attr on : " << make_pair(coll, ghobj) << ", "
1950 << cpp_strerror(r) << std::endl;
1954 int sr = store->stat(coll, ghobj, &st, true);
1957 cerr << "Error stat on : " << make_pair(coll, ghobj) << ", "
1958 << cpp_strerror(r) << std::endl;
1960 formatter->open_object_section("stat");
1961 formatter->dump_int("size", st.st_size);
1962 formatter->dump_int("blksize", st.st_blksize);
1963 formatter->dump_int("blocks", st.st_blocks);
1964 formatter->dump_int("nlink", st.st_nlink);
1965 formatter->close_section();
1968 if (ghobj.hobj.has_snapset()) {
1970 int snr = get_snapset(store, coll, ghobj, ss);
1974 formatter->open_object_section("SnapSet");
1976 formatter->close_section();
1979 formatter->close_section();
1980 formatter->flush(cout);
1985 int set_size(ObjectStore *store, coll_t coll, ghobject_t &ghobj, uint64_t setsize, Formatter* formatter,
1986 ObjectStore::Sequencer &osr, bool corrupt)
1988 if (ghobj.hobj.is_snapdir()) {
1989 cerr << "Can't set the size of a snapdir" << std::endl;
1993 int r = store->getattr(coll, ghobj, OI_ATTR, attr);
1995 cerr << "Error getting attr on : " << make_pair(coll, ghobj) << ", "
1996 << cpp_strerror(r) << std::endl;
2000 bufferlist::iterator bp = attr.begin();
2005 cerr << "Error getting attr on : " << make_pair(coll, ghobj) << ", "
2006 << cpp_strerror(r) << std::endl;
2010 r = store->stat(coll, ghobj, &st, true);
2012 cerr << "Error stat on : " << make_pair(coll, ghobj) << ", "
2013 << cpp_strerror(r) << std::endl;
2015 ghobject_t head(ghobj);
2017 bool found_head = true;
2018 map<snapid_t, uint64_t>::iterator csi;
2019 bool is_snap = ghobj.hobj.is_snap();
2021 head.hobj = head.hobj.get_head();
2022 r = get_snapset(store, coll, head, ss, true);
2023 if (r < 0 && r != -ENOENT) {
2024 // Requested get_snapset() silent, so if not -ENOENT show error
2025 cerr << "Error getting snapset on : " << make_pair(coll, head) << ", "
2026 << cpp_strerror(r) << std::endl;
2030 head.hobj = head.hobj.get_snapdir();
2031 r = get_snapset(store, coll, head, ss);
2038 csi = ss.clone_size.find(ghobj.hobj.snap);
2039 if (csi == ss.clone_size.end()) {
2040 cerr << "SnapSet is missing clone_size for snap " << ghobj.hobj.snap << std::endl;
2044 if ((uint64_t)st.st_size == setsize && oi.size == setsize
2045 && (!is_snap || csi->second == setsize)) {
2046 cout << "Size of object is already " << setsize << std::endl;
2049 cout << "Setting size to " << setsize << ", stat size " << st.st_size
2050 << ", obj info size " << oi.size;
2052 cout << ", " << (found_head ? "head" : "snapdir")
2053 << " clone_size " << csi->second;
2054 csi->second = setsize;
2060 ::encode(oi, attr, -1); /* fixme: using full features */
2061 ObjectStore::Transaction t;
2062 t.setattr(coll, ghobj, OI_ATTR, attr);
2063 // Only modify object info if we want to corrupt it
2065 t.truncate(coll, ghobj, setsize);
2067 bufferlist snapattr;
2069 ::encode(ss, snapattr);
2070 t.setattr(coll, head, SS_ATTR, snapattr);
2072 r = store->apply_transaction(&osr, std::move(t));
2074 cerr << "Error writing object info: " << make_pair(coll, ghobj) << ", "
2075 << cpp_strerror(r) << std::endl;
2082 int clear_snapset(ObjectStore *store, coll_t coll, ghobject_t &ghobj,
2083 string arg, ObjectStore::Sequencer &osr)
2086 int ret = get_snapset(store, coll, ghobj, ss);
2090 // Use "head" to set head_exists incorrectly
2091 if (arg == "corrupt" || arg == "head")
2092 ss.head_exists = !ghobj.hobj.is_head();
2093 else if (ss.head_exists != ghobj.hobj.is_head()) {
2094 cerr << "Correcting head_exists, set to "
2095 << (ghobj.hobj.is_head() ? "true" : "false") << std::endl;
2096 ss.head_exists = ghobj.hobj.is_head();
2098 // Use "corrupt" to clear entire SnapSet
2099 // Use "seq" to just corrupt SnapSet.seq
2100 if (arg == "corrupt" || arg == "seq")
2102 // Use "snaps" to just clear SnapSet.snaps
2103 if (arg == "corrupt" || arg == "snaps")
2105 // By default just clear clone, clone_overlap and clone_size
2106 if (arg == "corrupt")
2108 if (arg == "" || arg == "clones")
2110 if (arg == "" || arg == "clone_overlap")
2111 ss.clone_overlap.clear();
2112 if (arg == "" || arg == "clone_size")
2113 ss.clone_size.clear();
2114 // Break all clone sizes by adding 1
2115 if (arg == "size") {
2116 for (map<snapid_t, uint64_t>::iterator i = ss.clone_size.begin();
2117 i != ss.clone_size.end(); ++i)
2124 ObjectStore::Transaction t;
2125 t.setattr(coll, ghobj, SS_ATTR, bl);
2126 int r = store->apply_transaction(&osr, std::move(t));
2128 cerr << "Error setting snapset on : " << make_pair(coll, ghobj) << ", "
2129 << cpp_strerror(r) << std::endl;
2136 vector<snapid_t>::iterator find(vector<snapid_t> &v, snapid_t clid)
2138 return std::find(v.begin(), v.end(), clid);
2141 map<snapid_t, interval_set<uint64_t> >::iterator
2142 find(map<snapid_t, interval_set<uint64_t> > &m, snapid_t clid)
2144 return m.find(clid);
2147 map<snapid_t, uint64_t>::iterator find(map<snapid_t, uint64_t> &m,
2150 return m.find(clid);
2154 int remove_from(T &mv, string name, snapid_t cloneid, bool force)
2156 typename T::iterator i = find(mv, cloneid);
2157 if (i != mv.end()) {
2160 cerr << "Clone " << cloneid << " doesn't exist in " << name;
2162 cerr << " (ignored)" << std::endl;
2171 int remove_clone(ObjectStore *store, coll_t coll, ghobject_t &ghobj, snapid_t cloneid, bool force,
2172 ObjectStore::Sequencer &osr)
2174 // XXX: Don't allow this if in a cache tier or former cache tier
2175 // bool allow_incomplete_clones() const {
2176 // return cache_mode != CACHEMODE_NONE || has_flag(FLAG_INCOMPLETE_CLONES);
2179 int ret = get_snapset(store, coll, ghobj, snapset);
2183 // Derived from trim_object()
2185 vector<snapid_t>::iterator p;
2186 for (p = snapset.clones.begin(); p != snapset.clones.end(); ++p)
2189 if (p == snapset.clones.end()) {
2190 cerr << "Clone " << cloneid << " not present";
2193 if (p != snapset.clones.begin()) {
2194 // not the oldest... merge overlap into next older clone
2195 vector<snapid_t>::iterator n = p - 1;
2196 hobject_t prev_coid = ghobj.hobj;
2197 prev_coid.snap = *n;
2198 //bool adjust_prev_bytes = is_present_clone(prev_coid);
2200 //if (adjust_prev_bytes)
2201 // ctx->delta_stats.num_bytes -= snapset.get_clone_bytes(*n);
2203 snapset.clone_overlap[*n].intersection_of(
2204 snapset.clone_overlap[*p]);
2206 //if (adjust_prev_bytes)
2207 // ctx->delta_stats.num_bytes += snapset.get_clone_bytes(*n);
2210 ret = remove_from(snapset.clones, "clones", cloneid, force);
2211 if (ret) return ret;
2212 ret = remove_from(snapset.clone_overlap, "clone_overlap", cloneid, force);
2213 if (ret) return ret;
2214 ret = remove_from(snapset.clone_size, "clone_size", cloneid, force);
2215 if (ret) return ret;
2221 ::encode(snapset, bl);
2222 ObjectStore::Transaction t;
2223 t.setattr(coll, ghobj, SS_ATTR, bl);
2224 int r = store->apply_transaction(&osr, std::move(t));
2226 cerr << "Error setting snapset on : " << make_pair(coll, ghobj) << ", "
2227 << cpp_strerror(r) << std::endl;
2230 cout << "Removal of clone " << cloneid << " complete" << std::endl;
2231 cout << "Use pg repair after OSD restarted to correct stat information" << std::endl;
2235 int dup(string srcpath, ObjectStore *src, string dstpath, ObjectStore *dst)
2237 cout << "dup from " << src->get_type() << ": " << srcpath << "\n"
2238 << " to " << dst->get_type() << ": " << dstpath
2240 ObjectStore::Sequencer osr("dup");
2242 vector<coll_t> collections;
2247 cerr << "failed to mount src: " << cpp_strerror(r) << std::endl;
2252 cerr << "failed to mount dst: " << cpp_strerror(r) << std::endl;
2256 if (src->get_fsid() != dst->get_fsid()) {
2257 cerr << "src fsid " << src->get_fsid() << " != dest " << dst->get_fsid()
2261 cout << "fsid " << src->get_fsid() << std::endl;
2263 // make sure dst is empty
2264 r = dst->list_collections(collections);
2266 cerr << "error listing collections on dst: " << cpp_strerror(r) << std::endl;
2269 if (!collections.empty()) {
2270 cerr << "destination store is not empty" << std::endl;
2274 r = src->list_collections(collections);
2276 cerr << "error listing collections on src: " << cpp_strerror(r) << std::endl;
2280 num = collections.size();
2281 cout << num << " collections" << std::endl;
2283 for (auto cid : collections) {
2284 cout << i++ << "/" << num << " " << cid << std::endl;
2286 ObjectStore::Transaction t;
2287 int bits = src->collection_bits(cid);
2289 if (src->get_type() == "filestore" && cid.is_meta()) {
2292 cerr << "cannot get bit count for collection " << cid << ": "
2293 << cpp_strerror(bits) << std::endl;
2297 t.create_collection(cid, bits);
2298 dst->apply_transaction(&osr, std::move(t));
2303 uint64_t bytes = 0, keys = 0;
2305 vector<ghobject_t> ls;
2306 r = src->collection_list(cid, pos, ghobject_t::get_max(), 1000, &ls, &pos);
2308 cerr << "collection_list on " << cid << " from " << pos << " got: "
2309 << cpp_strerror(r) << std::endl;
2316 for (auto& oid : ls) {
2317 //cout << " " << cid << " " << oid << std::endl;
2319 cout << " " << std::setw(16) << n << " objects, "
2320 << std::setw(16) << bytes << " bytes, "
2321 << std::setw(16) << keys << " keys"
2322 << std::setw(1) << "\r" << std::flush;
2326 ObjectStore::Transaction t;
2329 map<string,bufferptr> attrs;
2330 src->getattrs(cid, oid, attrs);
2331 if (!attrs.empty()) {
2332 t.setattrs(cid, oid, attrs);
2336 src->read(cid, oid, 0, 0, bl);
2338 t.write(cid, oid, 0, bl.length(), bl);
2339 bytes += bl.length();
2343 map<string,bufferlist> omap;
2344 src->omap_get(cid, oid, &header, &omap);
2345 if (header.length()) {
2346 t.omap_setheader(cid, oid, header);
2349 if (!omap.empty()) {
2350 keys += omap.size();
2351 t.omap_setkeys(cid, oid, omap);
2354 dst->apply_transaction(&osr, std::move(t));
2357 cout << " " << std::setw(16) << n << " objects, "
2358 << std::setw(16) << bytes << " bytes, "
2359 << std::setw(16) << keys << " keys"
2360 << std::setw(1) << std::endl;
2364 cout << "keyring" << std::endl;
2367 string s = srcpath + "/keyring";
2369 r = bl.read_file(s.c_str(), &err);
2371 cerr << "failed to copy " << s << ": " << err << std::endl;
2373 string d = dstpath + "/keyring";
2374 bl.write_file(d.c_str(), 0600);
2379 cout << "duping osd metadata" << std::endl;
2381 for (auto k : {"magic", "whoami", "ceph_fsid", "fsid"}) {
2383 src->read_meta(k, &val);
2384 dst->write_meta(k, val);
2388 dst->write_meta("ready", "ready");
2390 cout << "done." << std::endl;
2399 void usage(po::options_description &desc)
2402 cerr << desc << std::endl;
2404 cerr << "Positional syntax:" << std::endl;
2406 cerr << "ceph-objectstore-tool ... <object> (get|set)-bytes [file]" << std::endl;
2407 cerr << "ceph-objectstore-tool ... <object> set-(attr|omap) <key> [file]" << std::endl;
2408 cerr << "ceph-objectstore-tool ... <object> (get|rm)-(attr|omap) <key>" << std::endl;
2409 cerr << "ceph-objectstore-tool ... <object> get-omaphdr" << std::endl;
2410 cerr << "ceph-objectstore-tool ... <object> set-omaphdr [file]" << std::endl;
2411 cerr << "ceph-objectstore-tool ... <object> list-attrs" << std::endl;
2412 cerr << "ceph-objectstore-tool ... <object> list-omap" << std::endl;
2413 cerr << "ceph-objectstore-tool ... <object> remove|removeall" << std::endl;
2414 cerr << "ceph-objectstore-tool ... <object> dump" << std::endl;
2415 cerr << "ceph-objectstore-tool ... <object> set-size" << std::endl;
2416 cerr << "ceph-objectstore-tool ... <object> remove-clone-metadata <cloneid>" << std::endl;
2418 cerr << "<object> can be a JSON object description as displayed" << std::endl;
2419 cerr << "by --op list." << std::endl;
2420 cerr << "<object> can be an object name which will be looked up in all" << std::endl;
2421 cerr << "the OSD's PGs." << std::endl;
2422 cerr << "<object> can be the empty string ('') which with a provided pgid " << std::endl;
2423 cerr << "specifies the pgmeta object" << std::endl;
2425 cerr << "The optional [file] argument will read stdin or write stdout" << std::endl;
2426 cerr << "if not specified or if '-' specified." << std::endl;
2429 bool ends_with(const string& check, const string& ending)
2431 return check.size() >= ending.size() && check.rfind(ending) == (check.size() - ending.size());
2434 // Based on FileStore::dump_journal(), set-up enough to only dump
2435 int mydump_journal(Formatter *f, string journalpath, bool m_journal_dio)
2439 if (!journalpath.length())
2442 FileJournal *journal = new FileJournal(g_ceph_context, uuid_d(), NULL, NULL,
2443 journalpath.c_str(), m_journal_dio);
2444 r = journal->_fdump(*f, false);
2449 int apply_layout_settings(ObjectStore *os, const OSDSuperblock &superblock,
2450 const string &pool_name, const spg_t &pgid, bool dry_run)
2454 FileStore *fs = dynamic_cast<FileStore*>(os);
2456 cerr << "Nothing to do for non-filestore backend" << std::endl;
2457 return 0; // making this return success makes testing easier
2462 r = get_osdmap(os, superblock.current_epoch, curmap, bl);
2464 cerr << "Can't find local OSDMap: " << cpp_strerror(r) << std::endl;
2468 int64_t poolid = -1;
2469 if (pool_name.length()) {
2470 poolid = curmap.lookup_pg_pool_name(pool_name);
2472 cerr << "Couldn't find pool " << pool_name << ": " << cpp_strerror(poolid)
2478 vector<coll_t> collections, filtered_colls;
2479 r = os->list_collections(collections);
2481 cerr << "Error listing collections: " << cpp_strerror(r) << std::endl;
2485 for (auto const &coll : collections) {
2487 if (coll.is_pg(&coll_pgid) &&
2488 ((poolid >= 0 && coll_pgid.pool() == (uint64_t)poolid) ||
2489 coll_pgid == pgid)) {
2490 filtered_colls.push_back(coll);
2494 size_t done = 0, total = filtered_colls.size();
2495 for (auto const &coll : filtered_colls) {
2497 cerr << "Would apply layout settings to " << coll << std::endl;
2499 cerr << "Finished " << done << "/" << total << " collections" << "\r";
2500 r = fs->apply_layout_settings(coll);
2502 cerr << "Error applying layout settings to " << coll << std::endl;
2509 cerr << "Finished " << total << "/" << total << " collections" << "\r" << std::endl;
2513 int main(int argc, char **argv)
2515 string dpath, jpath, pgidstr, op, file, mountpoint, mon_store_path, object;
2516 string target_data_path, fsid;
2517 string objcmd, arg1, arg2, type, format, argnspace, pool;
2518 boost::optional<std::string> nspace;
2522 bool human_readable;
2524 Formatter *formatter;
2527 po::options_description desc("Allowed options");
2529 ("help", "produce help message")
2530 ("type", po::value<string>(&type),
2531 "Arg is one of [bluestore, filestore (default), memstore]")
2532 ("data-path", po::value<string>(&dpath),
2533 "path to object store, mandatory")
2534 ("journal-path", po::value<string>(&jpath),
2535 "path to journal, use if tool can't find it")
2536 ("pgid", po::value<string>(&pgidstr),
2537 "PG id, mandatory for info, log, remove, export, export-remove, rm-past-intervals, mark-complete, and mandatory for apply-layout-settings if --pool is not specified")
2538 ("pool", po::value<string>(&pool),
2539 "Pool name, mandatory for apply-layout-settings if --pgid is not specified")
2540 ("op", po::value<string>(&op),
2541 "Arg is one of [info, log, remove, mkfs, fsck, repair, fuse, dup, export, export-remove, import, list, fix-lost, list-pgs, rm-past-intervals, dump-journal, dump-super, meta-list, "
2542 "get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, apply-layout-settings, update-mon-db]")
2543 ("epoch", po::value<unsigned>(&epoch),
2544 "epoch# for get-osdmap and get-inc-osdmap, the current epoch in use if not specified")
2545 ("file", po::value<string>(&file),
2546 "path of file to export, export-remove, import, get-osdmap, set-osdmap, get-inc-osdmap or set-inc-osdmap")
2547 ("mon-store-path", po::value<string>(&mon_store_path),
2548 "path of monstore to update-mon-db")
2549 ("fsid", po::value<string>(&fsid),
2550 "fsid for new store created by mkfs")
2551 ("target-data-path", po::value<string>(&target_data_path),
2552 "path of target object store (for --op dup)")
2553 ("mountpoint", po::value<string>(&mountpoint),
2555 ("format", po::value<string>(&format)->default_value("json-pretty"),
2556 "Output format which may be json, json-pretty, xml, xml-pretty")
2557 ("debug", "Enable diagnostic output to stderr")
2558 ("force", "Ignore some types of errors and proceed with operation - USE WITH CAUTION: CORRUPTION POSSIBLE NOW OR IN THE FUTURE")
2559 ("skip-journal-replay", "Disable journal replay")
2560 ("skip-mount-omap", "Disable mounting of omap")
2561 ("head", "Find head/snapdir when searching for objects by name")
2562 ("dry-run", "Don't modify the objectstore")
2563 ("namespace", po::value<string>(&argnspace), "Specify namespace when searching for objects")
2566 po::options_description positional("Positional options");
2567 positional.add_options()
2568 ("object", po::value<string>(&object), "'' for pgmeta_oid, object name or ghobject in json")
2569 ("objcmd", po::value<string>(&objcmd), "command [(get|set)-bytes, (get|set|rm)-(attr|omap), (get|set)-omaphdr, list-attrs, list-omap, remove]")
2570 ("arg1", po::value<string>(&arg1), "arg1 based on cmd")
2571 ("arg2", po::value<string>(&arg2), "arg2 based on cmd")
2572 ("test-align", po::value<uint64_t>(&testalign)->default_value(0), "hidden align option for testing")
2575 po::options_description all("All options");
2576 all.add(desc).add(positional);
2578 po::positional_options_description pd;
2579 pd.add("object", 1).add("objcmd", 1).add("arg1", 1).add("arg2", 1);
2581 vector<string> ceph_option_strings;
2582 po::variables_map vm;
2584 po::parsed_options parsed =
2585 po::command_line_parser(argc, argv).options(all).allow_unregistered().positional(pd).run();
2586 po::store( parsed, vm);
2588 ceph_option_strings = po::collect_unrecognized(parsed.options,
2589 po::include_positional);
2590 } catch(po::error &e) {
2591 std::cerr << e.what() << std::endl;
2595 if (vm.count("help")) {
2600 debug = (vm.count("debug") > 0);
2602 force = (vm.count("force") > 0);
2604 if (vm.count("namespace"))
2607 dry_run = (vm.count("dry-run") > 0);
2609 osflagbits_t flags = 0;
2610 if (dry_run || vm.count("skip-journal-replay"))
2611 flags |= SKIP_JOURNAL_REPLAY;
2612 if (vm.count("skip-mount-omap"))
2613 flags |= SKIP_MOUNT_OMAP;
2614 if (op == "update-mon-db")
2615 flags |= SKIP_JOURNAL_REPLAY;
2617 head = (vm.count("head") > 0);
2619 vector<const char *> ceph_options;
2620 env_to_vec(ceph_options);
2621 ceph_options.reserve(ceph_options.size() + ceph_option_strings.size());
2622 for (vector<string>::iterator i = ceph_option_strings.begin();
2623 i != ceph_option_strings.end();
2625 ceph_options.push_back(i->c_str());
2629 snprintf(fn, sizeof(fn), "%s/type", dpath.c_str());
2630 int fd = ::open(fn, O_RDONLY);
2635 string dp_type = string(bl.c_str(), bl.length() - 1); // drop \n
2636 if (vm.count("type") && dp_type != "" && type != dp_type)
2637 cerr << "WARNING: Ignoring type \"" << type << "\" - found data-path type \""
2638 << dp_type << "\"" << std::endl;
2640 //cout << "object store type is " << type << std::endl;
2644 if (!vm.count("type") && type == "") {
2647 if (!vm.count("data-path") &&
2648 !(op == "dump-journal" && type == "filestore")) {
2649 cerr << "Must provide --data-path" << std::endl;
2653 if (type == "filestore" && !vm.count("journal-path")) {
2654 jpath = dpath + "/journal";
2656 if (!vm.count("op") && !vm.count("object")) {
2657 cerr << "Must provide --op or object command..." << std::endl;
2662 vm.count("op") && vm.count("object")) {
2663 cerr << "Can't specify both --op and object command syntax" << std::endl;
2667 if (op == "apply-layout-settings" && !(vm.count("pool") ^ vm.count("pgid"))) {
2668 cerr << "apply-layout-settings requires either --pool or --pgid"
2673 if (op != "list" && vm.count("object") && !vm.count("objcmd")) {
2674 cerr << "Invalid syntax, missing command" << std::endl;
2678 if (op == "fuse" && mountpoint.length() == 0) {
2679 cerr << "Missing fuse mountpoint" << std::endl;
2683 outistty = isatty(STDOUT_FILENO);
2686 if ((op == "export" || op == "export-remove" || op == "get-osdmap" || op == "get-inc-osdmap") && !dry_run) {
2687 if (!vm.count("file") || file == "-") {
2689 cerr << "stdout is a tty and no --file filename specified" << std::endl;
2692 file_fd = STDOUT_FILENO;
2694 file_fd = open(file.c_str(), O_WRONLY|O_CREAT|O_TRUNC, 0666);
2696 } else if (op == "import" || op == "set-osdmap" || op == "set-inc-osdmap") {
2697 if (!vm.count("file") || file == "-") {
2698 if (isatty(STDIN_FILENO)) {
2699 cerr << "stdin is a tty and no --file filename specified" << std::endl;
2702 file_fd = STDIN_FILENO;
2704 file_fd = open(file.c_str(), O_RDONLY);
2708 ObjectStoreTool tool = ObjectStoreTool(file_fd, dry_run);
2710 if (vm.count("file") && file_fd == fd_none && !dry_run) {
2711 cerr << "--file option only applies to import, export, export-remove, "
2712 << "get-osdmap, set-osdmap, get-inc-osdmap or set-inc-osdmap" << std::endl;
2716 if (file_fd != fd_none && file_fd < 0) {
2717 string err = string("file: ") + file;
2718 perror(err.c_str());
2722 auto cct = global_init(
2723 NULL, ceph_options, CEPH_ENTITY_TYPE_OSD,
2724 CODE_ENVIRONMENT_UTILITY_NODOUT, 0);
2725 //CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
2726 common_init_finish(g_ceph_context);
2727 g_conf = g_ceph_context->_conf;
2729 g_conf->set_val_or_die("log_to_stderr", "true");
2730 g_conf->set_val_or_die("err_to_stderr", "true");
2732 g_conf->apply_changes(NULL);
2734 // Special list handling. Treating pretty_format as human readable,
2735 // with one object per line and not an enclosing array.
2736 human_readable = ends_with(format, "-pretty");
2737 if ((op == "list" || op == "meta-list") && human_readable) {
2738 // Remove -pretty from end of format which we know is there
2739 format = format.substr(0, format.size() - strlen("-pretty"));
2742 formatter = Formatter::create(format);
2743 if (formatter == NULL) {
2744 cerr << "unrecognized format: " << format << std::endl;
2748 // Special handling for filestore journal, so we can dump it without mounting
2749 if (op == "dump-journal" && type == "filestore") {
2750 int ret = mydump_journal(formatter, jpath, g_conf->journal_dio);
2752 cerr << "journal-path: " << jpath << ": "
2753 << cpp_strerror(ret) << std::endl;
2756 formatter->flush(cout);
2760 //Verify that data-path really exists
2762 if (::stat(dpath.c_str(), &st) == -1) {
2763 string err = string("data-path: ") + dpath;
2764 perror(err.c_str());
2768 if (pgidstr.length() && !pgid.parse(pgidstr.c_str())) {
2769 cerr << "Invalid pgid '" << pgidstr << "' specified" << std::endl;
2773 //Verify that the journal-path really exists
2774 if (type == "filestore") {
2775 if (::stat(jpath.c_str(), &st) == -1) {
2776 string err = string("journal-path: ") + jpath;
2777 perror(err.c_str());
2780 if (S_ISDIR(st.st_mode)) {
2781 cerr << "journal-path: " << jpath << ": "
2782 << cpp_strerror(EISDIR) << std::endl;
2787 ObjectStore *fs = ObjectStore::create(g_ceph_context, type, dpath, jpath, flags);
2789 cerr << "Unable to create store of type " << type << std::endl;
2793 if (op == "fsck" || op == "fsck-deep") {
2794 int r = fs->fsck(op == "fsck-deep");
2796 cerr << "fsck failed: " << cpp_strerror(r) << std::endl;
2800 cerr << "fsck found " << r << " errors" << std::endl;
2803 cout << "fsck found no errors" << std::endl;
2806 if (op == "repair" || op == "repair-deep") {
2807 int r = fs->repair(op == "repair-deep");
2809 cerr << "repair failed: " << cpp_strerror(r) << std::endl;
2813 cerr << "repair found " << r << " errors" << std::endl;
2816 cout << "repair found no errors" << std::endl;
2820 if (fsid.length()) {
2822 bool r = f.parse(fsid.c_str());
2824 cerr << "failed to parse uuid '" << fsid << "'" << std::endl;
2831 cerr << "mkfs failed: " << cpp_strerror(r) << std::endl;
2839 snprintf(fn, sizeof(fn), "%s/type", target_data_path.c_str());
2840 int fd = ::open(fn, O_RDONLY);
2842 cerr << "Unable to open " << target_data_path << "/type" << std::endl;
2848 target_type = string(bl.c_str(), bl.length() - 1); // drop \n
2851 ObjectStore *targetfs = ObjectStore::create(
2852 g_ceph_context, target_type,
2853 target_data_path, "", 0);
2854 if (targetfs == NULL) {
2855 cerr << "Unable to open store of type " << target_type << std::endl;
2858 int r = dup(dpath, fs, target_data_path, targetfs);
2860 cerr << "dup failed: " << cpp_strerror(r) << std::endl;
2866 ObjectStore::Sequencer *osr = new ObjectStore::Sequencer(__func__);
2867 int ret = fs->mount();
2869 if (ret == -EBUSY) {
2870 cerr << "OSD has the store locked" << std::endl;
2872 cerr << "Mount failed with '" << cpp_strerror(ret) << "'" << std::endl;
2879 FuseStore fuse(fs, mountpoint);
2880 cout << "mounting fuse at " << mountpoint << " ..." << std::endl;
2881 int r = fuse.main();
2883 cerr << "failed to mount fuse: " << cpp_strerror(r) << std::endl;
2887 cerr << "fuse support not enabled" << std::endl;
2893 vector<coll_t>::iterator it;
2894 CompatSet supported;
2896 #ifdef INTERNAL_TEST
2897 supported = get_test_compat_set();
2899 supported = OSD::get_osd_compat_set();
2903 OSDSuperblock superblock;
2904 bufferlist::iterator p;
2905 ret = fs->read(coll_t::meta(), OSD_SUPERBLOCK_GOBJECT, 0, 0, bl);
2907 cerr << "Failure to read OSD superblock: " << cpp_strerror(ret) << std::endl;
2912 ::decode(superblock, p);
2915 cerr << "Cluster fsid=" << superblock.cluster_fsid << std::endl;
2919 cerr << "Supported features: " << supported << std::endl;
2920 cerr << "On-disk features: " << superblock.compat_features << std::endl;
2922 if (supported.compare(superblock.compat_features) == -1) {
2923 CompatSet unsupported = supported.unsupported(superblock.compat_features);
2924 cerr << "On-disk OSD incompatible features set "
2925 << unsupported << std::endl;
2930 if (op == "apply-layout-settings") {
2931 ret = apply_layout_settings(fs, superblock, pool, pgid, dry_run);
2935 if (op != "list" && vm.count("object")) {
2936 // Special case: Create pgmeta_oid if empty string specified
2937 // This can't conflict with any actual object names.
2939 ghobj = pgid.make_pgmeta_oid();
2941 json_spirit::Value v;
2943 if (!json_spirit::read(object, v) ||
2944 (v.type() != json_spirit::array_type && v.type() != json_spirit::obj_type)) {
2945 // Special: Need head/snapdir so set even if user didn't specify
2946 if (vm.count("objcmd") && (objcmd == "remove-clone-metadata"))
2948 lookup_ghobject lookup(object, nspace, head);
2949 if (action_on_all_objects(fs, lookup, debug)) {
2950 throw std::runtime_error("Internal error");
2952 if (lookup.size() != 1) {
2954 if (lookup.size() == 0)
2955 ss << "No object id '" << object << "' found or invalid JSON specified";
2957 ss << "Found " << lookup.size() << " objects with id '" << object
2958 << "', please use a JSON spec from --op list instead";
2959 throw std::runtime_error(ss.str());
2961 pair<coll_t, ghobject_t> found = lookup.pop();
2962 pgidstr = found.first.to_str();
2963 pgid.parse(pgidstr.c_str());
2964 ghobj = found.second;
2968 if (pgidstr.length() == 0 && v.type() != json_spirit::array_type) {
2969 ss << "Without --pgid the object '" << object
2970 << "' must be a JSON array";
2971 throw std::runtime_error(ss.str());
2973 if (v.type() == json_spirit::array_type) {
2974 json_spirit::Array array = v.get_array();
2975 if (array.size() != 2) {
2976 ss << "Object '" << object
2977 << "' must be a JSON array with 2 elements";
2978 throw std::runtime_error(ss.str());
2980 vector<json_spirit::Value>::iterator i = array.begin();
2981 assert(i != array.end());
2982 if (i->type() != json_spirit::str_type) {
2983 ss << "Object '" << object
2984 << "' must be a JSON array with the first element a string";
2985 throw std::runtime_error(ss.str());
2987 string object_pgidstr = i->get_str();
2988 if (object_pgidstr != "meta") {
2990 object_pgid.parse(object_pgidstr.c_str());
2991 if (pgidstr.length() > 0) {
2992 if (object_pgid != pgid) {
2993 ss << "object '" << object
2994 << "' has a pgid different from the --pgid="
2995 << pgidstr << " option";
2996 throw std::runtime_error(ss.str());
2999 pgidstr = object_pgidstr;
3003 pgidstr = object_pgidstr;
3010 } catch (std::runtime_error& e) {
3011 ss << "Decode object JSON error: " << e.what();
3012 throw std::runtime_error(ss.str());
3014 if (pgidstr != "meta" && (uint64_t)pgid.pgid.m_pool != (uint64_t)ghobj.hobj.pool) {
3015 cerr << "Object pool and pgid pool don't match" << std::endl;
3020 } catch (std::runtime_error& e) {
3021 cerr << e.what() << std::endl;
3028 // The ops which require --pgid option are checked here and
3029 // mentioned in the usage for --pgid.
3030 if ((op == "info" || op == "log" || op == "remove" || op == "export"
3031 || op == "export-remove" || op == "rm-past-intervals" || op == "mark-complete") &&
3032 pgidstr.length() == 0) {
3033 cerr << "Must provide pgid" << std::endl;
3039 if (op == "import") {
3042 ret = tool.do_import(fs, superblock, force, pgidstr, *osr);
3044 catch (const buffer::error &e) {
3045 cerr << "do_import threw exception error " << e.what() << std::endl;
3048 if (ret == -EFAULT) {
3049 cerr << "Corrupt input for import" << std::endl;
3052 cout << "Import successful" << std::endl;
3054 } else if (op == "dump-journal-mount") {
3055 // Undocumented feature to dump journal with mounted fs
3056 // This doesn't support the format option, but it uses the
3057 // ObjectStore::dump_journal() and mounts to get replay to run.
3058 ret = fs->dump_journal(cout);
3060 if (ret == -EOPNOTSUPP) {
3061 cerr << "Object store type \"" << type << "\" doesn't support journal dump" << std::endl;
3063 cerr << "Journal dump failed with error " << cpp_strerror(ret) << std::endl;
3067 } else if (op == "get-osdmap") {
3071 epoch = superblock.current_epoch;
3073 ret = get_osdmap(fs, epoch, osdmap, bl);
3075 cerr << "Failed to get osdmap#" << epoch << ": "
3076 << cpp_strerror(ret) << std::endl;
3079 ret = bl.write_fd(file_fd);
3081 cerr << "Failed to write to " << file << ": " << cpp_strerror(ret) << std::endl;
3083 cout << "osdmap#" << epoch << " exported." << std::endl;
3086 } else if (op == "set-osdmap") {
3088 ret = get_fd_data(file_fd, bl);
3090 cerr << "Failed to read osdmap " << cpp_strerror(ret) << std::endl;
3092 ret = set_osdmap(fs, epoch, bl, force, *osr);
3095 } else if (op == "get-inc-osdmap") {
3098 epoch = superblock.current_epoch;
3100 ret = get_inc_osdmap(fs, epoch, bl);
3102 cerr << "Failed to get incremental osdmap# " << epoch << ": "
3103 << cpp_strerror(ret) << std::endl;
3106 ret = bl.write_fd(file_fd);
3108 cerr << "Failed to write to " << file << ": " << cpp_strerror(ret) << std::endl;
3110 cout << "inc-osdmap#" << epoch << " exported." << std::endl;
3113 } else if (op == "set-inc-osdmap") {
3115 ret = get_fd_data(file_fd, bl);
3117 cerr << "Failed to read incremental osdmap " << cpp_strerror(ret) << std::endl;
3120 ret = set_inc_osdmap(fs, epoch, bl, force, *osr);
3123 } else if (op == "update-mon-db") {
3124 if (!vm.count("mon-store-path")) {
3125 cerr << "Please specify the path to monitor db to update" << std::endl;
3128 ret = update_mon_db(*fs, superblock, dpath + "/keyring", mon_store_path);
3133 log_oid = OSD::make_pg_log_oid(pgid);
3134 biginfo_oid = OSD::make_pg_biginfo_oid(pgid);
3136 if (op == "remove") {
3137 if (!force && !dry_run) {
3138 cerr << "Please use export-remove or you must use --force option" << std::endl;
3142 ret = initiate_new_remove_pg(fs, pgid, *osr);
3144 cerr << "PG '" << pgid << "' not found" << std::endl;
3147 cout << "Remove successful" << std::endl;
3151 if (op == "fix-lost") {
3152 boost::scoped_ptr<action_on_object_t> action;
3153 action.reset(new do_fix_lost(osr));
3154 if (pgidstr.length())
3155 ret = action_on_all_objects_in_exact_pg(fs, coll_t(pgid), *action, debug);
3157 ret = action_on_all_objects(fs, *action, debug);
3162 ret = do_list(fs, pgidstr, object, nspace, formatter, debug,
3163 human_readable, head);
3165 cerr << "do_list failed: " << cpp_strerror(ret) << std::endl;
3170 if (op == "dump-super") {
3171 formatter->open_object_section("superblock");
3172 superblock.dump(formatter);
3173 formatter->close_section();
3174 formatter->flush(cout);
3179 if (op == "meta-list") {
3180 ret = do_meta(fs, object, formatter, debug, human_readable);
3182 cerr << "do_meta failed: " << cpp_strerror(ret) << std::endl;
3187 ret = fs->list_collections(ls);
3189 cerr << "failed to list pgs: " << cpp_strerror(ret) << std::endl;
3193 if (debug && op == "list-pgs")
3194 cout << "Performing list-pgs operation" << std::endl;
3197 for (it = ls.begin(); it != ls.end(); ++it) {
3200 if (pgidstr == "meta") {
3201 if (it->to_str() == "meta")
3207 if (!it->is_pg(&tmppgid)) {
3211 if (it->is_temp(&tmppgid)) {
3215 if (op != "list-pgs" && tmppgid != pgid) {
3219 if (op != "list-pgs") {
3224 cout << tmppgid << std::endl;
3227 if (op == "list-pgs") {
3232 // If not an object command nor any of the ops handled below, then output this usage
3233 // before complaining about a bad pgid
3234 if (!vm.count("objcmd") && op != "export" && op != "export-remove" && op != "info" && op != "log" && op != "rm-past-intervals" && op != "mark-complete") {
3235 cerr << "Must provide --op (info, log, remove, mkfs, fsck, repair, export, export-remove, import, list, fix-lost, list-pgs, rm-past-intervals, dump-journal, dump-super, meta-list, "
3236 "get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete)"
3243 // The following code for export, info, log require omap or !skip-mount-omap
3244 if (it != ls.end()) {
3248 if (vm.count("objcmd")) {
3250 if (objcmd == "remove" || objcmd == "removeall") {
3251 bool all = (objcmd == "removeall");
3252 ret = do_remove_object(fs, coll, ghobj, all, force, *osr);
3254 } else if (objcmd == "list-attrs") {
3255 ret = do_list_attrs(fs, coll, ghobj);
3257 } else if (objcmd == "list-omap") {
3258 ret = do_list_omap(fs, coll, ghobj);
3260 } else if (objcmd == "get-bytes" || objcmd == "set-bytes") {
3261 if (objcmd == "get-bytes") {
3263 if (vm.count("arg1") == 0 || arg1 == "-") {
3266 fd = open(arg1.c_str(), O_WRONLY|O_TRUNC|O_CREAT|O_EXCL|O_LARGEFILE, 0666);
3268 cerr << "open " << arg1 << " " << cpp_strerror(errno) << std::endl;
3273 ret = do_get_bytes(fs, coll, ghobj, fd);
3274 if (fd != STDOUT_FILENO)
3278 if (vm.count("arg1") == 0 || arg1 == "-") {
3279 // Since read_fd() doesn't handle ^D from a tty stdin, don't allow it.
3280 if (isatty(STDIN_FILENO)) {
3281 cerr << "stdin is a tty and no file specified" << std::endl;
3287 fd = open(arg1.c_str(), O_RDONLY|O_LARGEFILE, 0666);
3289 cerr << "open " << arg1 << " " << cpp_strerror(errno) << std::endl;
3294 ret = do_set_bytes(fs, coll, ghobj, fd, *osr);
3295 if (fd != STDIN_FILENO)
3299 } else if (objcmd == "get-attr") {
3300 if (vm.count("arg1") == 0) {
3305 ret = do_get_attr(fs, coll, ghobj, arg1);
3307 } else if (objcmd == "set-attr") {
3308 if (vm.count("arg1") == 0) {
3314 if (vm.count("arg2") == 0 || arg2 == "-") {
3315 // Since read_fd() doesn't handle ^D from a tty stdin, don't allow it.
3316 if (isatty(STDIN_FILENO)) {
3317 cerr << "stdin is a tty and no file specified" << std::endl;
3323 fd = open(arg2.c_str(), O_RDONLY|O_LARGEFILE, 0666);
3325 cerr << "open " << arg2 << " " << cpp_strerror(errno) << std::endl;
3330 ret = do_set_attr(fs, coll, ghobj, arg1, fd, *osr);
3331 if (fd != STDIN_FILENO)
3334 } else if (objcmd == "rm-attr") {
3335 if (vm.count("arg1") == 0) {
3340 ret = do_rm_attr(fs, coll, ghobj, arg1, *osr);
3342 } else if (objcmd == "get-omap") {
3343 if (vm.count("arg1") == 0) {
3348 ret = do_get_omap(fs, coll, ghobj, arg1);
3350 } else if (objcmd == "set-omap") {
3351 if (vm.count("arg1") == 0) {
3357 if (vm.count("arg2") == 0 || arg2 == "-") {
3358 // Since read_fd() doesn't handle ^D from a tty stdin, don't allow it.
3359 if (isatty(STDIN_FILENO)) {
3360 cerr << "stdin is a tty and no file specified" << std::endl;
3366 fd = open(arg2.c_str(), O_RDONLY|O_LARGEFILE, 0666);
3368 cerr << "open " << arg2 << " " << cpp_strerror(errno) << std::endl;
3373 ret = do_set_omap(fs, coll, ghobj, arg1, fd, *osr);
3374 if (fd != STDIN_FILENO)
3377 } else if (objcmd == "rm-omap") {
3378 if (vm.count("arg1") == 0) {
3383 ret = do_rm_omap(fs, coll, ghobj, arg1, *osr);
3385 } else if (objcmd == "get-omaphdr") {
3386 if (vm.count("arg1")) {
3391 ret = do_get_omaphdr(fs, coll, ghobj);
3393 } else if (objcmd == "set-omaphdr") {
3395 if (vm.count("arg2")) {
3401 if (vm.count("arg1") == 0 || arg1 == "-") {
3402 // Since read_fd() doesn't handle ^D from a tty stdin, don't allow it.
3403 if (isatty(STDIN_FILENO)) {
3404 cerr << "stdin is a tty and no file specified" << std::endl;
3410 fd = open(arg1.c_str(), O_RDONLY|O_LARGEFILE, 0666);
3412 cerr << "open " << arg1 << " " << cpp_strerror(errno) << std::endl;
3417 ret = do_set_omaphdr(fs, coll, ghobj, fd, *osr);
3418 if (fd != STDIN_FILENO)
3421 } else if (objcmd == "dump") {
3422 // There should not be any other arguments
3423 if (vm.count("arg1") || vm.count("arg2")) {
3428 ret = print_obj_info(fs, coll, ghobj, formatter);
3430 } else if (objcmd == "set-size" || objcmd == "corrupt-size") {
3431 // Undocumented testing feature
3432 bool corrupt = (objcmd == "corrupt-size");
3434 if (vm.count("arg1") == 0 || vm.count("arg2")) {
3439 if (arg1.length() == 0 || !isdigit(arg1.c_str()[0])) {
3440 cerr << "Invalid size '" << arg1 << "' specified" << std::endl;
3444 uint64_t size = atoll(arg1.c_str());
3445 ret = set_size(fs, coll, ghobj, size, formatter, *osr, corrupt);
3447 } else if (objcmd == "clear-snapset") {
3448 // UNDOCUMENTED: For testing zap SnapSet
3449 // IGNORE extra args since not in usage anyway
3450 if (!ghobj.hobj.has_snapset()) {
3451 cerr << "'" << objcmd << "' requires a head or snapdir object" << std::endl;
3455 ret = clear_snapset(fs, coll, ghobj, arg1, *osr);
3457 } else if (objcmd == "remove-clone-metadata") {
3459 if (vm.count("arg1") == 0 || vm.count("arg2")) {
3464 if (!ghobj.hobj.has_snapset()) {
3465 cerr << "'" << objcmd << "' requires a head or snapdir object" << std::endl;
3469 if (arg1.length() == 0 || !isdigit(arg1.c_str()[0])) {
3470 cerr << "Invalid cloneid '" << arg1 << "' specified" << std::endl;
3474 snapid_t cloneid = atoi(arg1.c_str());
3475 ret = remove_clone(fs, coll, ghobj, cloneid, force, *osr);
3478 cerr << "Unknown object command '" << objcmd << "'" << std::endl;
3486 ret = PG::peek_map_epoch(fs, pgid, &map_epoch, &bl);
3488 cerr << "peek_map_epoch reports error" << std::endl;
3490 cerr << "map_epoch " << map_epoch << std::endl;
3492 pg_info_t info(pgid);
3493 PastIntervals past_intervals;
3495 ret = PG::read_info(fs, pgid, coll, bl, info, past_intervals,
3498 cerr << "read_info error " << cpp_strerror(ret) << std::endl;
3501 if (struct_ver < PG::compat_struct_v) {
3502 cerr << "PG is too old to upgrade, use older Ceph version" << std::endl;
3507 cerr << "struct_v " << (int)struct_ver << std::endl;
3509 if (op == "export" || op == "export-remove") {
3510 ret = tool.do_export(fs, coll, pgid, info, map_epoch, struct_ver, superblock, past_intervals);
3512 cerr << "Export successful" << std::endl;
3513 if (op == "export-remove") {
3514 ret = initiate_new_remove_pg(fs, pgid, *osr);
3515 // Export succeeded, so pgid is there
3517 cerr << "Remove successful" << std::endl;
3520 } else if (op == "info") {
3521 formatter->open_object_section("info");
3522 info.dump(formatter);
3523 formatter->close_section();
3524 formatter->flush(cout);
3526 } else if (op == "log") {
3527 PGLog::IndexedLog log;
3528 pg_missing_t missing;
3529 ret = get_log(fs, struct_ver, coll, pgid, info, log, missing);
3533 dump_log(formatter, cout, log, missing);
3534 } else if (op == "rm-past-intervals") {
3535 ObjectStore::Transaction tran;
3536 ObjectStore::Transaction *t = &tran;
3538 if (struct_ver < PG::compat_struct_v) {
3539 cerr << "Can't remove past-intervals, version mismatch " << (int)struct_ver
3540 << " (pg) < compat " << (int)PG::compat_struct_v << " (tool)"
3546 cout << "Remove past-intervals " << past_intervals << std::endl;
3548 past_intervals.clear();
3553 ret = write_info(*t, map_epoch, info, past_intervals);
3556 fs->apply_transaction(osr, std::move(*t));
3557 cout << "Removal succeeded" << std::endl;
3559 } else if (op == "mark-complete") {
3560 ObjectStore::Transaction tran;
3561 ObjectStore::Transaction *t = &tran;
3563 if (struct_ver < PG::compat_struct_v) {
3564 cerr << "Can't mark-complete, version mismatch " << (int)struct_ver
3565 << " (pg) < compat " << (int)PG::compat_struct_v << " (tool)"
3571 cout << "Marking complete " << std::endl;
3573 info.last_update = eversion_t(superblock.current_epoch, info.last_update.version + 1);
3574 info.last_backfill = hobject_t::get_max();
3575 info.last_epoch_started = superblock.current_epoch;
3576 info.history.last_epoch_started = superblock.current_epoch;
3577 info.history.last_epoch_clean = superblock.current_epoch;
3578 past_intervals.clear();
3581 ret = write_info(*t, map_epoch, info, past_intervals);
3584 fs->apply_transaction(osr, std::move(*t));
3586 cout << "Marking complete succeeded" << std::endl;
3588 assert(!"Should have already checked for valid --op");
3591 cerr << "PG '" << pgid << "' not found" << std::endl;
3596 int r = fs->umount();
3599 cerr << "umount failed: " << cpp_strerror(r) << std::endl;
3600 // If no previous error, then use umount() error
3606 // Export output can go to stdout, so put this message on stderr
3608 cerr << "dry-run: Nothing changed" << std::endl;
3610 cout << "dry-run: Nothing changed" << std::endl;