1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 #include "librbd/operation/ResizeRequest.h"
5 #include "librbd/ExclusiveLock.h"
6 #include "librbd/ImageCtx.h"
7 #include "librbd/internal.h"
8 #include "librbd/ObjectMap.h"
9 #include "librbd/Utils.h"
10 #include "librbd/io/ImageRequestWQ.h"
11 #include "librbd/operation/TrimRequest.h"
12 #include "common/dout.h"
13 #include "common/errno.h"
15 #define dout_subsys ceph_subsys_rbd
17 #define dout_prefix *_dout << "librbd::ResizeRequest: "
22 using util::create_async_context_callback;
23 using util::create_context_callback;
24 using util::create_rados_callback;
27 ResizeRequest<I>::ResizeRequest(I &image_ctx, Context *on_finish,
28 uint64_t new_size, bool allow_shrink, ProgressContext &prog_ctx,
29 uint64_t journal_op_tid, bool disable_journal)
30 : Request<I>(image_ctx, on_finish, journal_op_tid),
31 m_original_size(0), m_new_size(new_size), m_allow_shrink(allow_shrink),
32 m_prog_ctx(prog_ctx), m_new_parent_overlap(0), m_disable_journal(disable_journal),
38 ResizeRequest<I>::~ResizeRequest() {
39 I &image_ctx = this->m_image_ctx;
40 ResizeRequest *next_req = NULL;
42 RWLock::WLocker snap_locker(image_ctx.snap_lock);
43 assert(m_xlist_item.remove_myself());
44 if (!image_ctx.resize_reqs.empty()) {
45 next_req = image_ctx.resize_reqs.front();
49 if (next_req != NULL) {
50 RWLock::RLocker owner_locker(image_ctx.owner_lock);
56 void ResizeRequest<I>::send() {
57 I &image_ctx = this->m_image_ctx;
58 assert(image_ctx.owner_lock.is_locked());
61 RWLock::WLocker snap_locker(image_ctx.snap_lock);
62 if (!m_xlist_item.is_on_list()) {
63 image_ctx.resize_reqs.push_back(&m_xlist_item);
64 if (image_ctx.resize_reqs.front() != this) {
69 assert(image_ctx.resize_reqs.front() == this);
70 m_original_size = image_ctx.size;
71 compute_parent_overlap();
78 void ResizeRequest<I>::send_op() {
79 I &image_ctx = this->m_image_ctx;
80 assert(image_ctx.owner_lock.is_locked());
82 if (this->is_canceled()) {
83 this->async_complete(-ERESTART);
85 send_pre_block_writes();
90 void ResizeRequest<I>::send_pre_block_writes() {
91 I &image_ctx = this->m_image_ctx;
92 CephContext *cct = image_ctx.cct;
93 ldout(cct, 5) << this << " " << __func__ << dendl;
95 image_ctx.io_work_queue->block_writes(create_context_callback<
96 ResizeRequest<I>, &ResizeRequest<I>::handle_pre_block_writes>(this));
100 Context *ResizeRequest<I>::handle_pre_block_writes(int *result) {
101 I &image_ctx = this->m_image_ctx;
102 CephContext *cct = image_ctx.cct;
103 ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
106 lderr(cct) << "failed to block writes: " << cpp_strerror(*result) << dendl;
107 image_ctx.io_work_queue->unblock_writes();
108 return this->create_context_finisher(*result);
111 return send_append_op_event();
114 template <typename I>
115 Context *ResizeRequest<I>::send_append_op_event() {
116 I &image_ctx = this->m_image_ctx;
117 CephContext *cct = image_ctx.cct;
119 if (m_new_size < m_original_size && !m_allow_shrink) {
120 ldout(cct, 1) << " shrinking the image is not permitted" << dendl;
121 this->async_complete(-EINVAL);
125 if (m_disable_journal || !this->template append_op_event<
126 ResizeRequest<I>, &ResizeRequest<I>::handle_append_op_event>(this)) {
127 return send_grow_object_map();
130 ldout(cct, 5) << this << " " << __func__ << dendl;
134 template <typename I>
135 Context *ResizeRequest<I>::handle_append_op_event(int *result) {
136 I &image_ctx = this->m_image_ctx;
137 CephContext *cct = image_ctx.cct;
138 ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
141 lderr(cct) << "failed to commit journal entry: " << cpp_strerror(*result)
143 image_ctx.io_work_queue->unblock_writes();
144 return this->create_context_finisher(*result);
147 return send_grow_object_map();
150 template <typename I>
151 void ResizeRequest<I>::send_trim_image() {
152 I &image_ctx = this->m_image_ctx;
153 CephContext *cct = image_ctx.cct;
154 ldout(cct, 5) << this << " " << __func__ << dendl;
156 RWLock::RLocker owner_locker(image_ctx.owner_lock);
157 TrimRequest<I> *req = TrimRequest<I>::create(
158 image_ctx, create_context_callback<
159 ResizeRequest<I>, &ResizeRequest<I>::handle_trim_image>(this),
160 m_original_size, m_new_size, m_prog_ctx);
164 template <typename I>
165 Context *ResizeRequest<I>::handle_trim_image(int *result) {
166 I &image_ctx = this->m_image_ctx;
167 CephContext *cct = image_ctx.cct;
168 ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
170 if (*result == -ERESTART) {
171 ldout(cct, 5) << "resize operation interrupted" << dendl;
172 return this->create_context_finisher(*result);
173 } else if (*result < 0) {
174 lderr(cct) << "failed to trim image: " << cpp_strerror(*result) << dendl;
175 return this->create_context_finisher(*result);
178 send_post_block_writes();
182 template <typename I>
183 void ResizeRequest<I>::send_flush_cache() {
184 I &image_ctx = this->m_image_ctx;
185 if (image_ctx.object_cacher == nullptr) {
190 CephContext *cct = image_ctx.cct;
191 ldout(cct, 5) << this << " " << __func__ << dendl;
193 RWLock::RLocker owner_locker(image_ctx.owner_lock);
194 image_ctx.flush_cache(create_async_context_callback(
195 image_ctx, create_context_callback<
196 ResizeRequest<I>, &ResizeRequest<I>::handle_flush_cache>(this)));
199 template <typename I>
200 Context *ResizeRequest<I>::handle_flush_cache(int *result) {
201 I &image_ctx = this->m_image_ctx;
202 CephContext *cct = image_ctx.cct;
203 ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
206 lderr(cct) << "failed to flush cache: " << cpp_strerror(*result) << dendl;
207 return this->create_context_finisher(*result);
210 send_invalidate_cache();
214 template <typename I>
215 void ResizeRequest<I>::send_invalidate_cache() {
216 I &image_ctx = this->m_image_ctx;
217 CephContext *cct = image_ctx.cct;
218 ldout(cct, 5) << this << " " << __func__ << dendl;
220 // need to invalidate since we're deleting objects, and
221 // ObjectCacher doesn't track non-existent objects
222 RWLock::RLocker owner_locker(image_ctx.owner_lock);
223 image_ctx.invalidate_cache(false, create_async_context_callback(
224 image_ctx, create_context_callback<
225 ResizeRequest<I>, &ResizeRequest<I>::handle_invalidate_cache>(this)));
228 template <typename I>
229 Context *ResizeRequest<I>::handle_invalidate_cache(int *result) {
230 I &image_ctx = this->m_image_ctx;
231 CephContext *cct = image_ctx.cct;
232 ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
234 // ignore busy error -- writeback was successfully flushed so we might be
235 // wasting some cache space for trimmed objects, but they will get purged
236 // eventually. Most likely cause of the issue was a in-flight cache read
237 if (*result < 0 && *result != -EBUSY) {
238 lderr(cct) << "failed to invalidate cache: " << cpp_strerror(*result)
240 return this->create_context_finisher(*result);
247 template <typename I>
248 Context *ResizeRequest<I>::send_grow_object_map() {
249 I &image_ctx = this->m_image_ctx;
252 RWLock::WLocker snap_locker(image_ctx.snap_lock);
253 m_shrink_size_visible = true;
255 image_ctx.io_work_queue->unblock_writes();
257 if (m_original_size == m_new_size) {
258 return this->create_context_finisher(0);
259 } else if (m_new_size < m_original_size) {
264 image_ctx.owner_lock.get_read();
265 image_ctx.snap_lock.get_read();
266 if (image_ctx.object_map == nullptr) {
267 image_ctx.snap_lock.put_read();
268 image_ctx.owner_lock.put_read();
270 send_post_block_writes();
274 CephContext *cct = image_ctx.cct;
275 ldout(cct, 5) << this << " " << __func__ << dendl;
277 // should have been canceled prior to releasing lock
278 assert(image_ctx.exclusive_lock == nullptr ||
279 image_ctx.exclusive_lock->is_lock_owner());
281 image_ctx.object_map->aio_resize(
282 m_new_size, OBJECT_NONEXISTENT, create_context_callback<
283 ResizeRequest<I>, &ResizeRequest<I>::handle_grow_object_map>(this));
284 image_ctx.snap_lock.put_read();
285 image_ctx.owner_lock.put_read();
289 template <typename I>
290 Context *ResizeRequest<I>::handle_grow_object_map(int *result) {
291 I &image_ctx = this->m_image_ctx;
292 CephContext *cct = image_ctx.cct;
293 ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
295 assert(*result == 0);
296 send_post_block_writes();
300 template <typename I>
301 Context *ResizeRequest<I>::send_shrink_object_map() {
302 I &image_ctx = this->m_image_ctx;
304 image_ctx.owner_lock.get_read();
305 image_ctx.snap_lock.get_read();
306 if (image_ctx.object_map == nullptr || m_new_size > m_original_size) {
307 image_ctx.snap_lock.put_read();
308 image_ctx.owner_lock.put_read();
310 update_size_and_overlap();
311 return this->create_context_finisher(0);
314 CephContext *cct = image_ctx.cct;
315 ldout(cct, 5) << this << " " << __func__ << " "
316 << "original_size=" << m_original_size << ", "
317 << "new_size=" << m_new_size << dendl;
319 // should have been canceled prior to releasing lock
320 assert(image_ctx.exclusive_lock == nullptr ||
321 image_ctx.exclusive_lock->is_lock_owner());
323 image_ctx.object_map->aio_resize(
324 m_new_size, OBJECT_NONEXISTENT, create_context_callback<
325 ResizeRequest<I>, &ResizeRequest<I>::handle_shrink_object_map>(this));
326 image_ctx.snap_lock.put_read();
327 image_ctx.owner_lock.put_read();
331 template <typename I>
332 Context *ResizeRequest<I>::handle_shrink_object_map(int *result) {
333 I &image_ctx = this->m_image_ctx;
334 CephContext *cct = image_ctx.cct;
335 ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
337 update_size_and_overlap();
338 assert(*result == 0);
339 return this->create_context_finisher(0);
342 template <typename I>
343 void ResizeRequest<I>::send_post_block_writes() {
344 I &image_ctx = this->m_image_ctx;
345 CephContext *cct = image_ctx.cct;
346 ldout(cct, 5) << this << " " << __func__ << dendl;
348 RWLock::RLocker owner_locker(image_ctx.owner_lock);
349 image_ctx.io_work_queue->block_writes(create_context_callback<
350 ResizeRequest<I>, &ResizeRequest<I>::handle_post_block_writes>(this));
353 template <typename I>
354 Context *ResizeRequest<I>::handle_post_block_writes(int *result) {
355 I &image_ctx = this->m_image_ctx;
356 CephContext *cct = image_ctx.cct;
357 ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
360 image_ctx.io_work_queue->unblock_writes();
361 lderr(cct) << "failed to block writes prior to header update: "
362 << cpp_strerror(*result) << dendl;
363 return this->create_context_finisher(*result);
366 send_update_header();
370 template <typename I>
371 void ResizeRequest<I>::send_update_header() {
372 I &image_ctx = this->m_image_ctx;
373 CephContext *cct = image_ctx.cct;
374 ldout(cct, 5) << this << " " << __func__ << " "
375 << "original_size=" << m_original_size << ", "
376 << "new_size=" << m_new_size << dendl;;
378 // should have been canceled prior to releasing lock
379 RWLock::RLocker owner_locker(image_ctx.owner_lock);
380 assert(image_ctx.exclusive_lock == nullptr ||
381 image_ctx.exclusive_lock->is_lock_owner());
383 librados::ObjectWriteOperation op;
384 if (image_ctx.old_format) {
385 // rewrite only the size field of the header
386 // NOTE: format 1 image headers are not stored in fixed endian format
388 bl.append(reinterpret_cast<const char*>(&m_new_size), sizeof(m_new_size));
389 op.write(offsetof(rbd_obj_header_ondisk, image_size), bl);
391 cls_client::set_size(&op, m_new_size);
394 librados::AioCompletion *rados_completion = create_rados_callback<
395 ResizeRequest<I>, &ResizeRequest<I>::handle_update_header>(this);
396 int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid,
397 rados_completion, &op);
399 rados_completion->release();
402 template <typename I>
403 Context *ResizeRequest<I>::handle_update_header(int *result) {
404 I &image_ctx = this->m_image_ctx;
405 CephContext *cct = image_ctx.cct;
406 ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
409 lderr(cct) << "failed to update image header: " << cpp_strerror(*result)
411 image_ctx.io_work_queue->unblock_writes();
412 return this->create_context_finisher(*result);
415 return send_shrink_object_map();
418 template <typename I>
419 void ResizeRequest<I>::compute_parent_overlap() {
420 I &image_ctx = this->m_image_ctx;
421 RWLock::RLocker l2(image_ctx.parent_lock);
422 if (image_ctx.parent == NULL) {
423 m_new_parent_overlap = 0;
425 m_new_parent_overlap = MIN(m_new_size, image_ctx.parent_md.overlap);
429 template <typename I>
430 void ResizeRequest<I>::update_size_and_overlap() {
431 I &image_ctx = this->m_image_ctx;
433 RWLock::WLocker snap_locker(image_ctx.snap_lock);
434 image_ctx.size = m_new_size;
436 RWLock::WLocker parent_locker(image_ctx.parent_lock);
437 if (image_ctx.parent != NULL && m_new_size < m_original_size) {
438 image_ctx.parent_md.overlap = m_new_parent_overlap;
442 // blocked by POST_BLOCK_WRITES state
443 image_ctx.io_work_queue->unblock_writes();
446 } // namespace operation
447 } // namespace librbd
449 template class librbd::operation::ResizeRequest<librbd::ImageCtx>;