1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2014 Red Hat
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
15 #ifndef CEPH_OSD_BLUESTORE_BLUESTORE_TYPES_H
16 #define CEPH_OSD_BLUESTORE_BLUESTORE_TYPES_H
20 #include "include/types.h"
21 #include "include/interval_set.h"
22 #include "include/utime.h"
23 #include "common/hobject.h"
24 #include "compressor/Compressor.h"
25 #include "common/Checksummer.h"
26 #include "include/mempool.h"
32 /// label for block device
33 struct bluestore_bdev_label_t {
34 uuid_d osd_uuid; ///< osd uuid
35 uint64_t size; ///< device size
36 utime_t btime; ///< birth time
37 string description; ///< device description
39 map<string,string> meta; ///< {read,write}_meta() content from ObjectStore
41 void encode(bufferlist& bl) const;
42 void decode(bufferlist::iterator& p);
43 void dump(Formatter *f) const;
44 static void generate_test_instances(list<bluestore_bdev_label_t*>& o);
46 WRITE_CLASS_ENCODER(bluestore_bdev_label_t)
48 ostream& operator<<(ostream& out, const bluestore_bdev_label_t& l);
50 /// collection metadata
51 struct bluestore_cnode_t {
52 uint32_t bits; ///< how many bits of coll pgid are significant
54 explicit bluestore_cnode_t(int b=0) : bits(b) {}
56 DENC(bluestore_cnode_t, v, p) {
61 void dump(Formatter *f) const;
62 static void generate_test_instances(list<bluestore_cnode_t*>& o);
64 WRITE_CLASS_DENC(bluestore_cnode_t)
67 typedef mempool::bluestore_alloc::vector<AllocExtent> AllocExtentVector;
78 AllocExtent(int64_t off, int32_t len) : offset(off), length(len) { }
79 uint64_t end() const {
80 return offset + length;
82 bool operator==(const AllocExtent& other) const {
83 return offset == other.offset && length == other.length;
87 inline static ostream& operator<<(ostream& out, const AllocExtent& e) {
88 return out << "0x" << std::hex << e.offset << "~" << e.length << std::dec;
92 AllocExtentVector *m_extents;
97 void init(AllocExtentVector *extents, int64_t block_size,
98 uint64_t max_alloc_size) {
100 m_block_size = block_size;
101 m_max_blocks = max_alloc_size / block_size;
102 assert(m_extents->empty());
105 ExtentList(AllocExtentVector *extents, int64_t block_size) {
106 init(extents, block_size, 0);
109 ExtentList(AllocExtentVector *extents, int64_t block_size,
110 uint64_t max_alloc_size) {
111 init(extents, block_size, max_alloc_size);
118 void add_extents(int64_t start, int64_t count);
120 AllocExtentVector *get_extents() {
124 std::pair<int64_t, int64_t> get_nth_extent(int index) {
125 return std::make_pair
126 ((*m_extents)[index].offset / m_block_size,
127 (*m_extents)[index].length / m_block_size);
130 int64_t get_extent_count() {
131 return m_extents->size();
136 /// pextent: physical extent
137 struct bluestore_pextent_t : public AllocExtent {
138 const static uint64_t INVALID_OFFSET = ~0ull;
140 bluestore_pextent_t() : AllocExtent() {}
141 bluestore_pextent_t(uint64_t o, uint64_t l) : AllocExtent(o, l) {}
142 bluestore_pextent_t(const AllocExtent &ext) :
143 AllocExtent(ext.offset, ext.length) { }
145 bluestore_pextent_t& operator=(const AllocExtent &ext) {
150 bool is_valid() const {
151 return offset != INVALID_OFFSET;
154 DENC(bluestore_pextent_t, v, p) {
155 denc_lba(v.offset, p);
156 denc_varint_lowz(v.length, p);
159 void dump(Formatter *f) const;
160 static void generate_test_instances(list<bluestore_pextent_t*>& ls);
162 WRITE_CLASS_DENC(bluestore_pextent_t)
164 ostream& operator<<(ostream& out, const bluestore_pextent_t& o);
166 typedef mempool::bluestore_cache_other::vector<bluestore_pextent_t> PExtentVector;
169 struct denc_traits<PExtentVector> {
170 static constexpr bool supported = true;
171 static constexpr bool bounded = false;
172 static constexpr bool featured = false;
173 static constexpr bool need_contiguous = true;
174 static void bound_encode(const PExtentVector& v, size_t& p) {
175 p += sizeof(uint32_t);
176 const auto size = v.size();
179 denc(v.front(), per);
183 static void encode(const PExtentVector& v,
184 bufferlist::contiguous_appender& p) {
185 denc_varint(v.size(), p);
190 static void decode(PExtentVector& v, bufferptr::iterator& p) {
195 for (unsigned i=0; i<num; ++i) {
202 /// extent_map: a map of reference counted extents
203 struct bluestore_extent_ref_map_t {
207 record_t(uint32_t l=0, uint32_t r=0) : length(l), refs(r) {}
208 DENC(bluestore_extent_ref_map_t::record_t, v, p) {
209 denc_varint_lowz(v.length, p);
210 denc_varint(v.refs, p);
214 typedef mempool::bluestore_cache_other::map<uint64_t,record_t> map_t;
218 void _maybe_merge_left(map_t::iterator& p);
224 return ref_map.empty();
227 void get(uint64_t offset, uint32_t len);
228 void put(uint64_t offset, uint32_t len, PExtentVector *release,
229 bool *maybe_unshared);
231 bool contains(uint64_t offset, uint32_t len) const;
232 bool intersects(uint64_t offset, uint32_t len) const;
234 void bound_encode(size_t& p) const {
235 denc_varint((uint32_t)0, p);
236 if (!ref_map.empty()) {
237 size_t elem_size = 0;
238 denc_varint_lowz((uint64_t)0, elem_size);
239 ref_map.begin()->second.bound_encode(elem_size);
240 p += elem_size * ref_map.size();
243 void encode(bufferlist::contiguous_appender& p) const {
244 uint32_t n = ref_map.size();
247 auto i = ref_map.begin();
248 denc_varint_lowz(i->first, p);
250 int64_t pos = i->first;
253 denc_varint_lowz((int64_t)i->first - pos, p);
259 void decode(bufferptr::iterator& p) {
264 denc_varint_lowz(pos, p);
265 ref_map[pos].decode(p);
268 denc_varint_lowz(delta, p);
270 ref_map[pos].decode(p);
275 void dump(Formatter *f) const;
276 static void generate_test_instances(list<bluestore_extent_ref_map_t*>& o);
278 WRITE_CLASS_DENC(bluestore_extent_ref_map_t)
281 ostream& operator<<(ostream& out, const bluestore_extent_ref_map_t& rm);
282 static inline bool operator==(const bluestore_extent_ref_map_t::record_t& l,
283 const bluestore_extent_ref_map_t::record_t& r) {
284 return l.length == r.length && l.refs == r.refs;
286 static inline bool operator==(const bluestore_extent_ref_map_t& l,
287 const bluestore_extent_ref_map_t& r) {
288 return l.ref_map == r.ref_map;
290 static inline bool operator!=(const bluestore_extent_ref_map_t& l,
291 const bluestore_extent_ref_map_t& r) {
295 /// blob_use_tracker: a set of per-alloc unit ref counters to track blob usage
296 struct bluestore_blob_use_tracker_t {
297 // N.B.: There is no need to minimize au_size/num_au
298 // as much as possible (e.g. have just a single byte for au_size) since:
299 // 1) Struct isn't packed hence it's padded. And even if it's packed see 2)
300 // 2) Mem manager has its own granularity, most probably >= 8 bytes
302 uint32_t au_size; // Allocation (=tracking) unit size,
303 // == 0 if uninitialized
304 uint32_t num_au; // Amount of allocation units tracked
305 // == 0 if single unit or the whole blob is tracked
308 uint32_t* bytes_per_au;
309 uint32_t total_bytes;
312 bluestore_blob_use_tracker_t()
313 : au_size(0), num_au(0), bytes_per_au(nullptr) {
315 ~bluestore_blob_use_tracker_t() {
321 delete[] bytes_per_au;
328 uint32_t get_referenced_bytes() const {
333 for (size_t i = 0; i < num_au; ++i) {
334 total += bytes_per_au[i];
339 bool is_not_empty() const {
341 return total_bytes != 0;
343 for (size_t i = 0; i < num_au; ++i) {
344 if (bytes_per_au[i]) {
351 bool is_empty() const {
352 return !is_not_empty();
354 void prune_tail(uint32_t new_len) {
356 new_len = ROUND_UP_TO(new_len, au_size);
357 uint32_t _num_au = new_len / au_size;
358 assert(_num_au <= num_au);
360 num_au = _num_au; // bytes_per_au array is left unmodified
367 void add_tail(uint32_t new_len, uint32_t _au_size) {
368 auto full_size = au_size * (num_au ? num_au : 1);
369 assert(new_len >= full_size);
370 if (new_len == full_size) {
374 uint32_t old_total = total_bytes;
376 init(new_len, _au_size);
378 bytes_per_au[0] = old_total;
380 assert(_au_size == au_size);
381 new_len = ROUND_UP_TO(new_len, au_size);
382 uint32_t _num_au = new_len / au_size;
383 assert(_num_au >= num_au);
384 if (_num_au > num_au) {
385 auto old_bytes = bytes_per_au;
386 auto old_num_au = num_au;
389 for (size_t i = 0; i < old_num_au; i++) {
390 bytes_per_au[i] = old_bytes[i];
392 for (size_t i = old_num_au; i < num_au; i++) {
401 uint32_t full_length,
408 /// put: return true if the blob has no references any more after the call,
409 /// no release_units is filled for the sake of performance.
410 /// return false if there are some references to the blob,
411 /// in this case release_units contains pextents
412 /// (identified by their offsets relative to the blob start)
413 /// that are not used any more and can be safely deallocated.
417 PExtentVector *release);
419 bool can_split() const;
420 bool can_split_at(uint32_t blob_offset) const;
422 uint32_t blob_offset,
423 bluestore_blob_use_tracker_t* r);
426 const bluestore_blob_use_tracker_t& other) const;
428 void bound_encode(size_t& p) const {
429 denc_varint(au_size, p);
431 denc_varint(num_au, p);
433 denc_varint(total_bytes, p);
435 size_t elem_size = 0;
436 denc_varint((uint32_t)0, elem_size);
437 p += elem_size * num_au;
441 void encode(bufferlist::contiguous_appender& p) const {
442 denc_varint(au_size, p);
444 denc_varint(num_au, p);
446 denc_varint(total_bytes, p);
448 size_t elem_size = 0;
449 denc_varint((uint32_t)0, elem_size);
450 for (size_t i = 0; i < num_au; ++i) {
451 denc_varint(bytes_per_au[i], p);
456 void decode(bufferptr::iterator& p) {
458 denc_varint(au_size, p);
460 denc_varint(num_au, p);
462 denc_varint(total_bytes, p);
465 for (size_t i = 0; i < num_au; ++i) {
466 denc_varint(bytes_per_au[i], p);
472 void dump(Formatter *f) const;
473 static void generate_test_instances(list<bluestore_blob_use_tracker_t*>& o);
477 WRITE_CLASS_DENC(bluestore_blob_use_tracker_t)
478 ostream& operator<<(ostream& out, const bluestore_blob_use_tracker_t& rm);
480 /// blob: a piece of data on disk
481 struct bluestore_blob_t {
483 PExtentVector extents; ///< raw data position on device
484 uint32_t logical_length = 0; ///< original length of data stored in the blob
485 uint32_t compressed_length = 0; ///< compressed length if any
489 LEGACY_FLAG_MUTABLE = 1, ///< [legacy] blob can be overwritten or split
490 FLAG_COMPRESSED = 2, ///< blob is compressed
491 FLAG_CSUM = 4, ///< blob has checksums
492 FLAG_HAS_UNUSED = 8, ///< blob has unused map
493 FLAG_SHARED = 16, ///< blob is shared; see external SharedBlob
495 static string get_flags_string(unsigned flags);
497 uint32_t flags = 0; ///< FLAG_*
499 typedef uint16_t unused_t;
500 unused_t unused = 0; ///< portion that has never been written to (bitmap)
502 uint8_t csum_type = Checksummer::CSUM_NONE; ///< CSUM_*
503 uint8_t csum_chunk_order = 0; ///< csum block size is 1<<block_order bytes
505 bufferptr csum_data; ///< opaque vector of csum data
507 bluestore_blob_t(uint32_t f = 0) : flags(f) {}
509 const PExtentVector& get_extents() const {
514 void bound_encode(size_t& p, uint64_t struct_v) const {
515 assert(struct_v == 1 || struct_v == 2);
517 denc_varint(flags, p);
518 denc_varint_lowz(logical_length, p);
519 denc_varint_lowz(compressed_length, p);
521 denc(csum_chunk_order, p);
522 denc_varint(csum_data.length(), p);
523 p += csum_data.length();
524 p += sizeof(unused_t);
527 void encode(bufferlist::contiguous_appender& p, uint64_t struct_v) const {
528 assert(struct_v == 1 || struct_v == 2);
530 denc_varint(flags, p);
531 if (is_compressed()) {
532 denc_varint_lowz(logical_length, p);
533 denc_varint_lowz(compressed_length, p);
537 denc(csum_chunk_order, p);
538 denc_varint(csum_data.length(), p);
539 memcpy(p.get_pos_add(csum_data.length()), csum_data.c_str(),
547 void decode(bufferptr::iterator& p, uint64_t struct_v) {
548 assert(struct_v == 1 || struct_v == 2);
550 denc_varint(flags, p);
551 if (is_compressed()) {
552 denc_varint_lowz(logical_length, p);
553 denc_varint_lowz(compressed_length, p);
555 logical_length = get_ondisk_length();
559 denc(csum_chunk_order, p);
562 csum_data = p.get_ptr(len);
563 csum_data.reassign_to_mempool(mempool::mempool_bluestore_cache_other);
570 bool can_split() const {
572 !has_flag(FLAG_SHARED) &&
573 !has_flag(FLAG_COMPRESSED) &&
574 !has_flag(FLAG_HAS_UNUSED); // splitting unused set is complex
576 bool can_split_at(uint32_t blob_offset) const {
577 return !has_csum() || blob_offset % get_csum_chunk_size() == 0;
580 void dump(Formatter *f) const;
581 static void generate_test_instances(list<bluestore_blob_t*>& ls);
583 bool has_flag(unsigned f) const {
586 void set_flag(unsigned f) {
589 void clear_flag(unsigned f) {
592 string get_flags_string() const {
593 return get_flags_string(flags);
596 void set_compressed(uint64_t clen_orig, uint64_t clen) {
597 set_flag(FLAG_COMPRESSED);
598 logical_length = clen_orig;
599 compressed_length = clen;
601 bool is_mutable() const {
602 return !is_compressed() && !is_shared();
604 bool is_compressed() const {
605 return has_flag(FLAG_COMPRESSED);
607 bool has_csum() const {
608 return has_flag(FLAG_CSUM);
610 bool has_unused() const {
611 return has_flag(FLAG_HAS_UNUSED);
613 bool is_shared() const {
614 return has_flag(FLAG_SHARED);
617 /// return chunk (i.e. min readable block) size for the blob
618 uint64_t get_chunk_size(uint64_t dev_block_size) const {
620 MAX(dev_block_size, get_csum_chunk_size()) : dev_block_size;
622 uint32_t get_csum_chunk_size() const {
623 return 1 << csum_chunk_order;
625 uint32_t get_compressed_payload_length() const {
626 return is_compressed() ? compressed_length : 0;
628 uint64_t calc_offset(uint64_t x_off, uint64_t *plen) const {
629 auto p = extents.begin();
630 assert(p != extents.end());
631 while (x_off >= p->length) {
634 assert(p != extents.end());
637 *plen = p->length - x_off;
638 return p->offset + x_off;
641 // validate whether or not the status of pextents within the given range
642 // meets the requirement(allocated or unallocated).
643 bool _validate_range(uint64_t b_off, uint64_t b_len,
644 bool require_allocated) const {
645 auto p = extents.begin();
646 assert(p != extents.end());
647 while (b_off >= p->length) {
650 assert(p != extents.end());
654 assert(p != extents.end());
655 if (require_allocated != p->is_valid()) {
659 if (p->length >= b_len) {
665 assert(0 == "we should not get here");
668 /// return true if the entire range is allocated
669 /// (mapped to extents on disk)
670 bool is_allocated(uint64_t b_off, uint64_t b_len) const {
671 return _validate_range(b_off, b_len, true);
674 /// return true if the entire range is unallocated
675 /// (not mapped to extents on disk)
676 bool is_unallocated(uint64_t b_off, uint64_t b_len) const {
677 return _validate_range(b_off, b_len, false);
680 /// return true if the logical range has never been used
681 bool is_unused(uint64_t offset, uint64_t length) const {
685 uint64_t blob_len = get_logical_length();
686 assert((blob_len % (sizeof(unused)*8)) == 0);
687 assert(offset + length <= blob_len);
688 uint64_t chunk_size = blob_len / (sizeof(unused)*8);
689 uint64_t start = offset / chunk_size;
690 uint64_t end = ROUND_UP_TO(offset + length, chunk_size) / chunk_size;
692 while (i < end && (unused & (1u << i))) {
698 /// mark a range that has never been used
699 void add_unused(uint64_t offset, uint64_t length) {
700 uint64_t blob_len = get_logical_length();
701 assert((blob_len % (sizeof(unused)*8)) == 0);
702 assert(offset + length <= blob_len);
703 uint64_t chunk_size = blob_len / (sizeof(unused)*8);
704 uint64_t start = ROUND_UP_TO(offset, chunk_size) / chunk_size;
705 uint64_t end = (offset + length) / chunk_size;
706 for (auto i = start; i < end; ++i) {
710 set_flag(FLAG_HAS_UNUSED);
714 /// indicate that a range has (now) been used.
715 void mark_used(uint64_t offset, uint64_t length) {
717 uint64_t blob_len = get_logical_length();
718 assert((blob_len % (sizeof(unused)*8)) == 0);
719 assert(offset + length <= blob_len);
720 uint64_t chunk_size = blob_len / (sizeof(unused)*8);
721 uint64_t start = offset / chunk_size;
722 uint64_t end = ROUND_UP_TO(offset + length, chunk_size) / chunk_size;
723 for (auto i = start; i < end; ++i) {
724 unused &= ~(1u << i);
727 clear_flag(FLAG_HAS_UNUSED);
732 int map(uint64_t x_off, uint64_t x_len,
733 std::function<int(uint64_t,uint64_t)> f) const {
734 auto p = extents.begin();
735 assert(p != extents.end());
736 while (x_off >= p->length) {
739 assert(p != extents.end());
742 assert(p != extents.end());
743 uint64_t l = MIN(p->length - x_off, x_len);
744 int r = f(p->offset + x_off, l);
753 void map_bl(uint64_t x_off,
755 std::function<void(uint64_t,bufferlist&)> f) const {
756 auto p = extents.begin();
757 assert(p != extents.end());
758 while (x_off >= p->length) {
761 assert(p != extents.end());
763 bufferlist::iterator it = bl.begin();
764 uint64_t x_len = bl.length();
766 assert(p != extents.end());
767 uint64_t l = MIN(p->length - x_off, x_len);
770 f(p->offset + x_off, t);
777 uint32_t get_ondisk_length() const {
779 for (auto &p : extents) {
785 uint32_t get_logical_length() const {
786 return logical_length;
788 size_t get_csum_value_size() const;
790 size_t get_csum_count() const {
791 size_t vs = get_csum_value_size();
794 return csum_data.length() / vs;
796 uint64_t get_csum_item(unsigned i) const {
797 size_t cs = get_csum_value_size();
798 const char *p = csum_data.c_str();
801 assert(0 == "no csum data, bad index");
803 return reinterpret_cast<const uint8_t*>(p)[i];
805 return reinterpret_cast<const __le16*>(p)[i];
807 return reinterpret_cast<const __le32*>(p)[i];
809 return reinterpret_cast<const __le64*>(p)[i];
811 assert(0 == "unrecognized csum word size");
814 const char *get_csum_item_ptr(unsigned i) const {
815 size_t cs = get_csum_value_size();
816 return csum_data.c_str() + (cs * i);
818 char *get_csum_item_ptr(unsigned i) {
819 size_t cs = get_csum_value_size();
820 return csum_data.c_str() + (cs * i);
823 void init_csum(unsigned type, unsigned order, unsigned len) {
826 csum_chunk_order = order;
827 csum_data = buffer::create(get_csum_value_size() * len / get_csum_chunk_size());
829 csum_data.reassign_to_mempool(mempool::mempool_bluestore_cache_other);
832 /// calculate csum for the buffer at the given b_off
833 void calc_csum(uint64_t b_off, const bufferlist& bl);
835 /// verify csum: return -EOPNOTSUPP for unsupported checksum type;
836 /// return -1 and valid(nonnegative) b_bad_off for checksum error;
837 /// return 0 if all is well.
838 int verify_csum(uint64_t b_off, const bufferlist& bl, int* b_bad_off,
839 uint64_t *bad_csum) const;
841 bool can_prune_tail() const {
843 extents.size() > 1 && // if it's all invalid it's not pruning.
844 !extents.back().is_valid() &&
848 const auto &p = extents.back();
849 logical_length -= p.length;
854 csum_data = bufferptr(t.c_str(),
855 get_logical_length() / get_csum_chunk_size() *
856 get_csum_value_size());
859 void add_tail(uint32_t new_len) {
860 assert(is_mutable());
861 assert(!has_unused());
862 assert(new_len > logical_length);
863 extents.emplace_back(
865 bluestore_pextent_t::INVALID_OFFSET,
866 new_len - logical_length));
867 logical_length = new_len;
871 csum_data = buffer::create(
872 get_csum_value_size() * logical_length / get_csum_chunk_size());
873 csum_data.copy_in(0, t.length(), t.c_str());
874 csum_data.zero(t.length(), csum_data.length() - t.length());
877 uint32_t get_release_size(uint32_t min_alloc_size) const {
878 if (is_compressed()) {
879 return get_logical_length();
881 uint32_t res = get_csum_chunk_size();
882 if (!has_csum() || res < min_alloc_size) {
883 res = min_alloc_size;
888 void split(uint32_t blob_offset, bluestore_blob_t& rb);
889 void allocated(uint32_t b_off, uint32_t length, const AllocExtentVector& allocs);
890 void allocated_test(const bluestore_pextent_t& alloc); // intended for UT only
892 /// updates blob's pextents container and return unused pextents eligible
894 /// all - indicates that the whole blob to be released.
895 /// logical - specifies set of logical extents within blob's
897 /// Returns true if blob has no more valid pextents
898 bool release_extents(
900 const PExtentVector& logical,
903 WRITE_CLASS_DENC_FEATURED(bluestore_blob_t)
905 ostream& operator<<(ostream& out, const bluestore_blob_t& o);
908 /// shared blob state
909 struct bluestore_shared_blob_t {
910 uint64_t sbid; ///> shared blob id
911 bluestore_extent_ref_map_t ref_map; ///< shared blob extents
913 bluestore_shared_blob_t(uint64_t _sbid) : sbid(_sbid) {}
915 DENC(bluestore_shared_blob_t, v, p) {
922 void dump(Formatter *f) const;
923 static void generate_test_instances(list<bluestore_shared_blob_t*>& ls);
926 return ref_map.empty();
929 WRITE_CLASS_DENC(bluestore_shared_blob_t)
931 ostream& operator<<(ostream& out, const bluestore_shared_blob_t& o);
933 /// onode: per-object metadata
934 struct bluestore_onode_t {
935 uint64_t nid = 0; ///< numeric id (locally unique)
936 uint64_t size = 0; ///< object size
937 map<mempool::bluestore_cache_other::string, bufferptr> attrs; ///< attrs
940 uint32_t offset = 0; ///< logical offset for start of shard
941 uint32_t bytes = 0; ///< encoded bytes
942 DENC(shard_info, v, p) {
943 denc_varint(v.offset, p);
944 denc_varint(v.bytes, p);
946 void dump(Formatter *f) const;
948 vector<shard_info> extent_map_shards; ///< extent map shards (if any)
950 uint32_t expected_object_size = 0;
951 uint32_t expected_write_size = 0;
952 uint32_t alloc_hint_flags = 0;
960 string get_flags_string() const {
962 if (flags & FLAG_OMAP) {
968 bool has_flag(unsigned f) const {
972 void set_flag(unsigned f) {
976 void clear_flag(unsigned f) {
980 bool has_omap() const {
981 return has_flag(FLAG_OMAP);
984 void set_omap_flag() {
988 void clear_omap_flag() {
989 clear_flag(FLAG_OMAP);
992 DENC(bluestore_onode_t, v, p) {
994 denc_varint(v.nid, p);
995 denc_varint(v.size, p);
998 denc(v.extent_map_shards, p);
999 denc_varint(v.expected_object_size, p);
1000 denc_varint(v.expected_write_size, p);
1001 denc_varint(v.alloc_hint_flags, p);
1004 void dump(Formatter *f) const;
1005 static void generate_test_instances(list<bluestore_onode_t*>& o);
1007 WRITE_CLASS_DENC(bluestore_onode_t::shard_info)
1008 WRITE_CLASS_DENC(bluestore_onode_t)
1010 ostream& operator<<(ostream& out, const bluestore_onode_t::shard_info& si);
1012 /// writeahead-logged op
1013 struct bluestore_deferred_op_t {
1019 PExtentVector extents;
1022 DENC(bluestore_deferred_op_t, v, p) {
1023 DENC_START(1, 1, p);
1029 void dump(Formatter *f) const;
1030 static void generate_test_instances(list<bluestore_deferred_op_t*>& o);
1032 WRITE_CLASS_DENC(bluestore_deferred_op_t)
1035 /// writeahead-logged transaction
1036 struct bluestore_deferred_transaction_t {
1038 list<bluestore_deferred_op_t> ops;
1039 interval_set<uint64_t> released; ///< allocations to release after tx
1041 bluestore_deferred_transaction_t() : seq(0) {}
1043 DENC(bluestore_deferred_transaction_t, v, p) {
1044 DENC_START(1, 1, p);
1047 denc(v.released, p);
1050 void dump(Formatter *f) const;
1051 static void generate_test_instances(list<bluestore_deferred_transaction_t*>& o);
1053 WRITE_CLASS_DENC(bluestore_deferred_transaction_t)
1055 struct bluestore_compression_header_t {
1056 uint8_t type = Compressor::COMP_ALG_NONE;
1057 uint32_t length = 0;
1059 bluestore_compression_header_t() {}
1060 bluestore_compression_header_t(uint8_t _type)
1063 DENC(bluestore_compression_header_t, v, p) {
1064 DENC_START(1, 1, p);
1069 void dump(Formatter *f) const;
1070 static void generate_test_instances(list<bluestore_compression_header_t*>& o);
1072 WRITE_CLASS_DENC(bluestore_compression_header_t)