3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
41 #include <linux/blk-mq.h>
43 #include <linux/blkdev.h>
44 #include <linux/slab.h>
45 #include <linux/idr.h>
46 #include <linux/workqueue.h>
48 #include "rbd_types.h"
50 #define RBD_DEBUG /* Activate rbd_assert() calls */
53 * The basic unit of block I/O is a sector. It is interpreted in a
54 * number of contexts in Linux (blk, bio, genhd), but the default is
55 * universally 512 bytes. These symbols are just slightly more
56 * meaningful than the bare numbers they represent.
58 #define SECTOR_SHIFT 9
59 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
62 * Increment the given counter and return its updated value.
63 * If the counter is already 0 it will not be incremented.
64 * If the counter is already at its maximum value returns
65 * -EINVAL without updating it.
67 static int atomic_inc_return_safe(atomic_t *v)
71 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
72 if (counter <= (unsigned int)INT_MAX)
80 /* Decrement the counter. Return the resulting value, or -EINVAL */
81 static int atomic_dec_return_safe(atomic_t *v)
85 counter = atomic_dec_return(v);
94 #define RBD_DRV_NAME "rbd"
96 #define RBD_MINORS_PER_MAJOR 256
97 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
99 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
100 #define RBD_MAX_SNAP_NAME_LEN \
101 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
103 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
105 #define RBD_SNAP_HEAD_NAME "-"
107 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
109 /* This allows a single page to hold an image name sent by OSD */
110 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
111 #define RBD_IMAGE_ID_LEN_MAX 64
113 #define RBD_OBJ_PREFIX_LEN_MAX 64
117 #define RBD_FEATURE_LAYERING (1<<0)
118 #define RBD_FEATURE_STRIPINGV2 (1<<1)
119 #define RBD_FEATURES_ALL \
120 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
122 /* Features supported by this (client software) implementation. */
124 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
127 * An RBD device name will be "rbd#", where the "rbd" comes from
128 * RBD_DRV_NAME above, and # is a unique integer identifier.
129 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
130 * enough to hold all possible device names.
132 #define DEV_NAME_LEN 32
133 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
136 * block device image metadata (in-memory version)
138 struct rbd_image_header {
139 /* These six fields never change for a given rbd image */
146 u64 features; /* Might be changeable someday? */
148 /* The remaining fields need to be updated occasionally */
150 struct ceph_snap_context *snapc;
151 char *snap_names; /* format 1 only */
152 u64 *snap_sizes; /* format 1 only */
156 * An rbd image specification.
158 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
159 * identify an image. Each rbd_dev structure includes a pointer to
160 * an rbd_spec structure that encapsulates this identity.
162 * Each of the id's in an rbd_spec has an associated name. For a
163 * user-mapped image, the names are supplied and the id's associated
164 * with them are looked up. For a layered image, a parent image is
165 * defined by the tuple, and the names are looked up.
167 * An rbd_dev structure contains a parent_spec pointer which is
168 * non-null if the image it represents is a child in a layered
169 * image. This pointer will refer to the rbd_spec structure used
170 * by the parent rbd_dev for its own identity (i.e., the structure
171 * is shared between the parent and child).
173 * Since these structures are populated once, during the discovery
174 * phase of image construction, they are effectively immutable so
175 * we make no effort to synchronize access to them.
177 * Note that code herein does not assume the image name is known (it
178 * could be a null pointer).
182 const char *pool_name;
184 const char *image_id;
185 const char *image_name;
188 const char *snap_name;
194 * an instance of the client. multiple devices may share an rbd client.
197 struct ceph_client *client;
199 struct list_head node;
202 struct rbd_img_request;
203 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
205 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
207 struct rbd_obj_request;
208 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
210 enum obj_request_type {
211 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
214 enum obj_operation_type {
221 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
222 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
223 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
224 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
227 struct rbd_obj_request {
228 const char *object_name;
229 u64 offset; /* object start byte */
230 u64 length; /* bytes from offset */
234 * An object request associated with an image will have its
235 * img_data flag set; a standalone object request will not.
237 * A standalone object request will have which == BAD_WHICH
238 * and a null obj_request pointer.
240 * An object request initiated in support of a layered image
241 * object (to check for its existence before a write) will
242 * have which == BAD_WHICH and a non-null obj_request pointer.
244 * Finally, an object request for rbd image data will have
245 * which != BAD_WHICH, and will have a non-null img_request
246 * pointer. The value of which will be in the range
247 * 0..(img_request->obj_request_count-1).
250 struct rbd_obj_request *obj_request; /* STAT op */
252 struct rbd_img_request *img_request;
254 /* links for img_request->obj_requests list */
255 struct list_head links;
258 u32 which; /* posn image request list */
260 enum obj_request_type type;
262 struct bio *bio_list;
268 struct page **copyup_pages;
269 u32 copyup_page_count;
271 struct ceph_osd_request *osd_req;
273 u64 xferred; /* bytes transferred */
276 rbd_obj_callback_t callback;
277 struct completion completion;
283 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
284 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
285 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
286 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
289 struct rbd_img_request {
290 struct rbd_device *rbd_dev;
291 u64 offset; /* starting image byte offset */
292 u64 length; /* byte count from offset */
295 u64 snap_id; /* for reads */
296 struct ceph_snap_context *snapc; /* for writes */
299 struct request *rq; /* block request */
300 struct rbd_obj_request *obj_request; /* obj req initiator */
302 struct page **copyup_pages;
303 u32 copyup_page_count;
304 spinlock_t completion_lock;/* protects next_completion */
306 rbd_img_callback_t callback;
307 u64 xferred;/* aggregate bytes transferred */
308 int result; /* first nonzero obj_request result */
310 u32 obj_request_count;
311 struct list_head obj_requests; /* rbd_obj_request structs */
316 #define for_each_obj_request(ireq, oreq) \
317 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
318 #define for_each_obj_request_from(ireq, oreq) \
319 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
320 #define for_each_obj_request_safe(ireq, oreq, n) \
321 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
333 int dev_id; /* blkdev unique id */
335 int major; /* blkdev assigned major */
337 struct gendisk *disk; /* blkdev's gendisk and rq */
339 u32 image_format; /* Either 1 or 2 */
340 struct rbd_client *rbd_client;
342 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
344 spinlock_t lock; /* queue, flags, open_count */
346 struct rbd_image_header header;
347 unsigned long flags; /* possibly lock protected */
348 struct rbd_spec *spec;
352 struct ceph_file_layout layout;
354 struct ceph_osd_event *watch_event;
355 struct rbd_obj_request *watch_request;
357 struct rbd_spec *parent_spec;
360 struct rbd_device *parent;
362 /* Block layer tags. */
363 struct blk_mq_tag_set tag_set;
365 /* protects updating the header */
366 struct rw_semaphore header_rwsem;
368 struct rbd_mapping mapping;
370 struct list_head node;
374 unsigned long open_count; /* protected by lock */
378 * Flag bits for rbd_dev->flags. If atomicity is required,
379 * rbd_dev->lock is used to protect access.
381 * Currently, only the "removing" flag (which is coupled with the
382 * "open_count" field) requires atomic access.
385 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
386 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
389 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
391 static LIST_HEAD(rbd_dev_list); /* devices */
392 static DEFINE_SPINLOCK(rbd_dev_list_lock);
394 static LIST_HEAD(rbd_client_list); /* clients */
395 static DEFINE_SPINLOCK(rbd_client_list_lock);
397 /* Slab caches for frequently-allocated structures */
399 static struct kmem_cache *rbd_img_request_cache;
400 static struct kmem_cache *rbd_obj_request_cache;
401 static struct kmem_cache *rbd_segment_name_cache;
403 static int rbd_major;
404 static DEFINE_IDA(rbd_dev_id_ida);
406 static struct workqueue_struct *rbd_wq;
409 * Default to false for now, as single-major requires >= 0.75 version of
410 * userspace rbd utility.
412 static bool single_major = false;
413 module_param(single_major, bool, S_IRUGO);
414 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
416 static int rbd_img_request_submit(struct rbd_img_request *img_request);
418 static void rbd_dev_device_release(struct device *dev);
420 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
422 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
424 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
426 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
428 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
429 static void rbd_spec_put(struct rbd_spec *spec);
431 static int rbd_dev_id_to_minor(int dev_id)
433 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
436 static int minor_to_rbd_dev_id(int minor)
438 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
441 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
442 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
443 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
444 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
446 static struct attribute *rbd_bus_attrs[] = {
448 &bus_attr_remove.attr,
449 &bus_attr_add_single_major.attr,
450 &bus_attr_remove_single_major.attr,
454 static umode_t rbd_bus_is_visible(struct kobject *kobj,
455 struct attribute *attr, int index)
458 (attr == &bus_attr_add_single_major.attr ||
459 attr == &bus_attr_remove_single_major.attr))
465 static const struct attribute_group rbd_bus_group = {
466 .attrs = rbd_bus_attrs,
467 .is_visible = rbd_bus_is_visible,
469 __ATTRIBUTE_GROUPS(rbd_bus);
471 static struct bus_type rbd_bus_type = {
473 .bus_groups = rbd_bus_groups,
476 static void rbd_root_dev_release(struct device *dev)
480 static struct device rbd_root_dev = {
482 .release = rbd_root_dev_release,
485 static __printf(2, 3)
486 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
488 struct va_format vaf;
496 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
497 else if (rbd_dev->disk)
498 printk(KERN_WARNING "%s: %s: %pV\n",
499 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
500 else if (rbd_dev->spec && rbd_dev->spec->image_name)
501 printk(KERN_WARNING "%s: image %s: %pV\n",
502 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
503 else if (rbd_dev->spec && rbd_dev->spec->image_id)
504 printk(KERN_WARNING "%s: id %s: %pV\n",
505 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
507 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
508 RBD_DRV_NAME, rbd_dev, &vaf);
513 #define rbd_assert(expr) \
514 if (unlikely(!(expr))) { \
515 printk(KERN_ERR "\nAssertion failure in %s() " \
517 "\trbd_assert(%s);\n\n", \
518 __func__, __LINE__, #expr); \
521 #else /* !RBD_DEBUG */
522 # define rbd_assert(expr) ((void) 0)
523 #endif /* !RBD_DEBUG */
525 static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
526 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
527 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
528 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
530 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
531 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
532 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
533 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
534 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
536 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
537 u8 *order, u64 *snap_size);
538 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
540 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
542 static int rbd_open(struct block_device *bdev, fmode_t mode)
544 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
545 bool removing = false;
547 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
550 spin_lock_irq(&rbd_dev->lock);
551 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
554 rbd_dev->open_count++;
555 spin_unlock_irq(&rbd_dev->lock);
559 (void) get_device(&rbd_dev->dev);
564 static void rbd_release(struct gendisk *disk, fmode_t mode)
566 struct rbd_device *rbd_dev = disk->private_data;
567 unsigned long open_count_before;
569 spin_lock_irq(&rbd_dev->lock);
570 open_count_before = rbd_dev->open_count--;
571 spin_unlock_irq(&rbd_dev->lock);
572 rbd_assert(open_count_before > 0);
574 put_device(&rbd_dev->dev);
577 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
582 bool ro_changed = false;
584 /* get_user() may sleep, so call it before taking rbd_dev->lock */
585 if (get_user(val, (int __user *)(arg)))
588 ro = val ? true : false;
589 /* Snapshot doesn't allow to write*/
590 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
593 spin_lock_irq(&rbd_dev->lock);
594 /* prevent others open this device */
595 if (rbd_dev->open_count > 1) {
600 if (rbd_dev->mapping.read_only != ro) {
601 rbd_dev->mapping.read_only = ro;
606 spin_unlock_irq(&rbd_dev->lock);
607 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
608 if (ret == 0 && ro_changed)
609 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
614 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
615 unsigned int cmd, unsigned long arg)
617 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
622 ret = rbd_ioctl_set_ro(rbd_dev, arg);
632 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
633 unsigned int cmd, unsigned long arg)
635 return rbd_ioctl(bdev, mode, cmd, arg);
637 #endif /* CONFIG_COMPAT */
639 static const struct block_device_operations rbd_bd_ops = {
640 .owner = THIS_MODULE,
642 .release = rbd_release,
645 .compat_ioctl = rbd_compat_ioctl,
650 * Initialize an rbd client instance. Success or not, this function
651 * consumes ceph_opts. Caller holds client_mutex.
653 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
655 struct rbd_client *rbdc;
658 dout("%s:\n", __func__);
659 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
663 kref_init(&rbdc->kref);
664 INIT_LIST_HEAD(&rbdc->node);
666 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
667 if (IS_ERR(rbdc->client))
669 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
671 ret = ceph_open_session(rbdc->client);
675 spin_lock(&rbd_client_list_lock);
676 list_add_tail(&rbdc->node, &rbd_client_list);
677 spin_unlock(&rbd_client_list_lock);
679 dout("%s: rbdc %p\n", __func__, rbdc);
683 ceph_destroy_client(rbdc->client);
688 ceph_destroy_options(ceph_opts);
689 dout("%s: error %d\n", __func__, ret);
694 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
696 kref_get(&rbdc->kref);
702 * Find a ceph client with specific addr and configuration. If
703 * found, bump its reference count.
705 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
707 struct rbd_client *client_node;
710 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
713 spin_lock(&rbd_client_list_lock);
714 list_for_each_entry(client_node, &rbd_client_list, node) {
715 if (!ceph_compare_options(ceph_opts, client_node->client)) {
716 __rbd_get_client(client_node);
722 spin_unlock(&rbd_client_list_lock);
724 return found ? client_node : NULL;
734 /* string args above */
737 /* Boolean args above */
741 static match_table_t rbd_opts_tokens = {
743 /* string args above */
744 {Opt_read_only, "read_only"},
745 {Opt_read_only, "ro"}, /* Alternate spelling */
746 {Opt_read_write, "read_write"},
747 {Opt_read_write, "rw"}, /* Alternate spelling */
748 /* Boolean args above */
756 #define RBD_READ_ONLY_DEFAULT false
758 static int parse_rbd_opts_token(char *c, void *private)
760 struct rbd_options *rbd_opts = private;
761 substring_t argstr[MAX_OPT_ARGS];
762 int token, intval, ret;
764 token = match_token(c, rbd_opts_tokens, argstr);
768 if (token < Opt_last_int) {
769 ret = match_int(&argstr[0], &intval);
771 pr_err("bad mount option arg (not int) "
775 dout("got int token %d val %d\n", token, intval);
776 } else if (token > Opt_last_int && token < Opt_last_string) {
777 dout("got string token %d val %s\n", token,
779 } else if (token > Opt_last_string && token < Opt_last_bool) {
780 dout("got Boolean token %d\n", token);
782 dout("got token %d\n", token);
787 rbd_opts->read_only = true;
790 rbd_opts->read_only = false;
799 static char* obj_op_name(enum obj_operation_type op_type)
814 * Get a ceph client with specific addr and configuration, if one does
815 * not exist create it. Either way, ceph_opts is consumed by this
818 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
820 struct rbd_client *rbdc;
822 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
823 rbdc = rbd_client_find(ceph_opts);
824 if (rbdc) /* using an existing client */
825 ceph_destroy_options(ceph_opts);
827 rbdc = rbd_client_create(ceph_opts);
828 mutex_unlock(&client_mutex);
834 * Destroy ceph client
836 * Caller must hold rbd_client_list_lock.
838 static void rbd_client_release(struct kref *kref)
840 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
842 dout("%s: rbdc %p\n", __func__, rbdc);
843 spin_lock(&rbd_client_list_lock);
844 list_del(&rbdc->node);
845 spin_unlock(&rbd_client_list_lock);
847 ceph_destroy_client(rbdc->client);
852 * Drop reference to ceph client node. If it's not referenced anymore, release
855 static void rbd_put_client(struct rbd_client *rbdc)
858 kref_put(&rbdc->kref, rbd_client_release);
861 static bool rbd_image_format_valid(u32 image_format)
863 return image_format == 1 || image_format == 2;
866 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
871 /* The header has to start with the magic rbd header text */
872 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
875 /* The bio layer requires at least sector-sized I/O */
877 if (ondisk->options.order < SECTOR_SHIFT)
880 /* If we use u64 in a few spots we may be able to loosen this */
882 if (ondisk->options.order > 8 * sizeof (int) - 1)
886 * The size of a snapshot header has to fit in a size_t, and
887 * that limits the number of snapshots.
889 snap_count = le32_to_cpu(ondisk->snap_count);
890 size = SIZE_MAX - sizeof (struct ceph_snap_context);
891 if (snap_count > size / sizeof (__le64))
895 * Not only that, but the size of the entire the snapshot
896 * header must also be representable in a size_t.
898 size -= snap_count * sizeof (__le64);
899 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
906 * Fill an rbd image header with information from the given format 1
909 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
910 struct rbd_image_header_ondisk *ondisk)
912 struct rbd_image_header *header = &rbd_dev->header;
913 bool first_time = header->object_prefix == NULL;
914 struct ceph_snap_context *snapc;
915 char *object_prefix = NULL;
916 char *snap_names = NULL;
917 u64 *snap_sizes = NULL;
923 /* Allocate this now to avoid having to handle failure below */
928 len = strnlen(ondisk->object_prefix,
929 sizeof (ondisk->object_prefix));
930 object_prefix = kmalloc(len + 1, GFP_KERNEL);
933 memcpy(object_prefix, ondisk->object_prefix, len);
934 object_prefix[len] = '\0';
937 /* Allocate the snapshot context and fill it in */
939 snap_count = le32_to_cpu(ondisk->snap_count);
940 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
943 snapc->seq = le64_to_cpu(ondisk->snap_seq);
945 struct rbd_image_snap_ondisk *snaps;
946 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
948 /* We'll keep a copy of the snapshot names... */
950 if (snap_names_len > (u64)SIZE_MAX)
952 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
956 /* ...as well as the array of their sizes. */
958 size = snap_count * sizeof (*header->snap_sizes);
959 snap_sizes = kmalloc(size, GFP_KERNEL);
964 * Copy the names, and fill in each snapshot's id
967 * Note that rbd_dev_v1_header_info() guarantees the
968 * ondisk buffer we're working with has
969 * snap_names_len bytes beyond the end of the
970 * snapshot id array, this memcpy() is safe.
972 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
973 snaps = ondisk->snaps;
974 for (i = 0; i < snap_count; i++) {
975 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
976 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
980 /* We won't fail any more, fill in the header */
983 header->object_prefix = object_prefix;
984 header->obj_order = ondisk->options.order;
985 header->crypt_type = ondisk->options.crypt_type;
986 header->comp_type = ondisk->options.comp_type;
987 /* The rest aren't used for format 1 images */
988 header->stripe_unit = 0;
989 header->stripe_count = 0;
990 header->features = 0;
992 ceph_put_snap_context(header->snapc);
993 kfree(header->snap_names);
994 kfree(header->snap_sizes);
997 /* The remaining fields always get updated (when we refresh) */
999 header->image_size = le64_to_cpu(ondisk->image_size);
1000 header->snapc = snapc;
1001 header->snap_names = snap_names;
1002 header->snap_sizes = snap_sizes;
1010 ceph_put_snap_context(snapc);
1011 kfree(object_prefix);
1016 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1018 const char *snap_name;
1020 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1022 /* Skip over names until we find the one we are looking for */
1024 snap_name = rbd_dev->header.snap_names;
1026 snap_name += strlen(snap_name) + 1;
1028 return kstrdup(snap_name, GFP_KERNEL);
1032 * Snapshot id comparison function for use with qsort()/bsearch().
1033 * Note that result is for snapshots in *descending* order.
1035 static int snapid_compare_reverse(const void *s1, const void *s2)
1037 u64 snap_id1 = *(u64 *)s1;
1038 u64 snap_id2 = *(u64 *)s2;
1040 if (snap_id1 < snap_id2)
1042 return snap_id1 == snap_id2 ? 0 : -1;
1046 * Search a snapshot context to see if the given snapshot id is
1049 * Returns the position of the snapshot id in the array if it's found,
1050 * or BAD_SNAP_INDEX otherwise.
1052 * Note: The snapshot array is in kept sorted (by the osd) in
1053 * reverse order, highest snapshot id first.
1055 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1057 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1060 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1061 sizeof (snap_id), snapid_compare_reverse);
1063 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1066 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1070 const char *snap_name;
1072 which = rbd_dev_snap_index(rbd_dev, snap_id);
1073 if (which == BAD_SNAP_INDEX)
1074 return ERR_PTR(-ENOENT);
1076 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1077 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1080 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1082 if (snap_id == CEPH_NOSNAP)
1083 return RBD_SNAP_HEAD_NAME;
1085 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1086 if (rbd_dev->image_format == 1)
1087 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1089 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1092 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1095 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1096 if (snap_id == CEPH_NOSNAP) {
1097 *snap_size = rbd_dev->header.image_size;
1098 } else if (rbd_dev->image_format == 1) {
1101 which = rbd_dev_snap_index(rbd_dev, snap_id);
1102 if (which == BAD_SNAP_INDEX)
1105 *snap_size = rbd_dev->header.snap_sizes[which];
1110 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1119 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1122 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1123 if (snap_id == CEPH_NOSNAP) {
1124 *snap_features = rbd_dev->header.features;
1125 } else if (rbd_dev->image_format == 1) {
1126 *snap_features = 0; /* No features for format 1 */
1131 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1135 *snap_features = features;
1140 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1142 u64 snap_id = rbd_dev->spec->snap_id;
1147 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1150 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1154 rbd_dev->mapping.size = size;
1155 rbd_dev->mapping.features = features;
1160 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1162 rbd_dev->mapping.size = 0;
1163 rbd_dev->mapping.features = 0;
1166 static void rbd_segment_name_free(const char *name)
1168 /* The explicit cast here is needed to drop the const qualifier */
1170 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1173 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1180 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1183 segment = offset >> rbd_dev->header.obj_order;
1184 name_format = "%s.%012llx";
1185 if (rbd_dev->image_format == 2)
1186 name_format = "%s.%016llx";
1187 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1188 rbd_dev->header.object_prefix, segment);
1189 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1190 pr_err("error formatting segment name for #%llu (%d)\n",
1192 rbd_segment_name_free(name);
1199 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1201 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1203 return offset & (segment_size - 1);
1206 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1207 u64 offset, u64 length)
1209 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1211 offset &= segment_size - 1;
1213 rbd_assert(length <= U64_MAX - offset);
1214 if (offset + length > segment_size)
1215 length = segment_size - offset;
1221 * returns the size of an object in the image
1223 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1225 return 1 << header->obj_order;
1232 static void bio_chain_put(struct bio *chain)
1238 chain = chain->bi_next;
1244 * zeros a bio chain, starting at specific offset
1246 static void zero_bio_chain(struct bio *chain, int start_ofs)
1249 struct bvec_iter iter;
1250 unsigned long flags;
1255 bio_for_each_segment(bv, chain, iter) {
1256 if (pos + bv.bv_len > start_ofs) {
1257 int remainder = max(start_ofs - pos, 0);
1258 buf = bvec_kmap_irq(&bv, &flags);
1259 memset(buf + remainder, 0,
1260 bv.bv_len - remainder);
1261 flush_dcache_page(bv.bv_page);
1262 bvec_kunmap_irq(buf, &flags);
1267 chain = chain->bi_next;
1272 * similar to zero_bio_chain(), zeros data defined by a page array,
1273 * starting at the given byte offset from the start of the array and
1274 * continuing up to the given end offset. The pages array is
1275 * assumed to be big enough to hold all bytes up to the end.
1277 static void zero_pages(struct page **pages, u64 offset, u64 end)
1279 struct page **page = &pages[offset >> PAGE_SHIFT];
1281 rbd_assert(end > offset);
1282 rbd_assert(end - offset <= (u64)SIZE_MAX);
1283 while (offset < end) {
1286 unsigned long flags;
1289 page_offset = offset & ~PAGE_MASK;
1290 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1291 local_irq_save(flags);
1292 kaddr = kmap_atomic(*page);
1293 memset(kaddr + page_offset, 0, length);
1294 flush_dcache_page(*page);
1295 kunmap_atomic(kaddr);
1296 local_irq_restore(flags);
1304 * Clone a portion of a bio, starting at the given byte offset
1305 * and continuing for the number of bytes indicated.
1307 static struct bio *bio_clone_range(struct bio *bio_src,
1308 unsigned int offset,
1314 bio = bio_clone(bio_src, gfpmask);
1316 return NULL; /* ENOMEM */
1318 bio_advance(bio, offset);
1319 bio->bi_iter.bi_size = len;
1325 * Clone a portion of a bio chain, starting at the given byte offset
1326 * into the first bio in the source chain and continuing for the
1327 * number of bytes indicated. The result is another bio chain of
1328 * exactly the given length, or a null pointer on error.
1330 * The bio_src and offset parameters are both in-out. On entry they
1331 * refer to the first source bio and the offset into that bio where
1332 * the start of data to be cloned is located.
1334 * On return, bio_src is updated to refer to the bio in the source
1335 * chain that contains first un-cloned byte, and *offset will
1336 * contain the offset of that byte within that bio.
1338 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1339 unsigned int *offset,
1343 struct bio *bi = *bio_src;
1344 unsigned int off = *offset;
1345 struct bio *chain = NULL;
1348 /* Build up a chain of clone bios up to the limit */
1350 if (!bi || off >= bi->bi_iter.bi_size || !len)
1351 return NULL; /* Nothing to clone */
1355 unsigned int bi_size;
1359 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1360 goto out_err; /* EINVAL; ran out of bio's */
1362 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1363 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1365 goto out_err; /* ENOMEM */
1368 end = &bio->bi_next;
1371 if (off == bi->bi_iter.bi_size) {
1382 bio_chain_put(chain);
1388 * The default/initial value for all object request flags is 0. For
1389 * each flag, once its value is set to 1 it is never reset to 0
1392 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1394 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1395 struct rbd_device *rbd_dev;
1397 rbd_dev = obj_request->img_request->rbd_dev;
1398 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1403 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1406 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1409 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1411 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1412 struct rbd_device *rbd_dev = NULL;
1414 if (obj_request_img_data_test(obj_request))
1415 rbd_dev = obj_request->img_request->rbd_dev;
1416 rbd_warn(rbd_dev, "obj_request %p already marked done",
1421 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1424 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1428 * This sets the KNOWN flag after (possibly) setting the EXISTS
1429 * flag. The latter is set based on the "exists" value provided.
1431 * Note that for our purposes once an object exists it never goes
1432 * away again. It's possible that the response from two existence
1433 * checks are separated by the creation of the target object, and
1434 * the first ("doesn't exist") response arrives *after* the second
1435 * ("does exist"). In that case we ignore the second one.
1437 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1441 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1442 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1446 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1449 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1452 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1455 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1458 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1460 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1462 return obj_request->img_offset <
1463 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1466 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1468 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1469 atomic_read(&obj_request->kref.refcount));
1470 kref_get(&obj_request->kref);
1473 static void rbd_obj_request_destroy(struct kref *kref);
1474 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1476 rbd_assert(obj_request != NULL);
1477 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1478 atomic_read(&obj_request->kref.refcount));
1479 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1482 static void rbd_img_request_get(struct rbd_img_request *img_request)
1484 dout("%s: img %p (was %d)\n", __func__, img_request,
1485 atomic_read(&img_request->kref.refcount));
1486 kref_get(&img_request->kref);
1489 static bool img_request_child_test(struct rbd_img_request *img_request);
1490 static void rbd_parent_request_destroy(struct kref *kref);
1491 static void rbd_img_request_destroy(struct kref *kref);
1492 static void rbd_img_request_put(struct rbd_img_request *img_request)
1494 rbd_assert(img_request != NULL);
1495 dout("%s: img %p (was %d)\n", __func__, img_request,
1496 atomic_read(&img_request->kref.refcount));
1497 if (img_request_child_test(img_request))
1498 kref_put(&img_request->kref, rbd_parent_request_destroy);
1500 kref_put(&img_request->kref, rbd_img_request_destroy);
1503 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1504 struct rbd_obj_request *obj_request)
1506 rbd_assert(obj_request->img_request == NULL);
1508 /* Image request now owns object's original reference */
1509 obj_request->img_request = img_request;
1510 obj_request->which = img_request->obj_request_count;
1511 rbd_assert(!obj_request_img_data_test(obj_request));
1512 obj_request_img_data_set(obj_request);
1513 rbd_assert(obj_request->which != BAD_WHICH);
1514 img_request->obj_request_count++;
1515 list_add_tail(&obj_request->links, &img_request->obj_requests);
1516 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1517 obj_request->which);
1520 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1521 struct rbd_obj_request *obj_request)
1523 rbd_assert(obj_request->which != BAD_WHICH);
1525 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1526 obj_request->which);
1527 list_del(&obj_request->links);
1528 rbd_assert(img_request->obj_request_count > 0);
1529 img_request->obj_request_count--;
1530 rbd_assert(obj_request->which == img_request->obj_request_count);
1531 obj_request->which = BAD_WHICH;
1532 rbd_assert(obj_request_img_data_test(obj_request));
1533 rbd_assert(obj_request->img_request == img_request);
1534 obj_request->img_request = NULL;
1535 obj_request->callback = NULL;
1536 rbd_obj_request_put(obj_request);
1539 static bool obj_request_type_valid(enum obj_request_type type)
1542 case OBJ_REQUEST_NODATA:
1543 case OBJ_REQUEST_BIO:
1544 case OBJ_REQUEST_PAGES:
1551 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1552 struct rbd_obj_request *obj_request)
1554 dout("%s %p\n", __func__, obj_request);
1555 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1558 static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1560 dout("%s %p\n", __func__, obj_request);
1561 ceph_osdc_cancel_request(obj_request->osd_req);
1565 * Wait for an object request to complete. If interrupted, cancel the
1566 * underlying osd request.
1568 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1572 dout("%s %p\n", __func__, obj_request);
1574 ret = wait_for_completion_interruptible(&obj_request->completion);
1576 dout("%s %p interrupted\n", __func__, obj_request);
1577 rbd_obj_request_end(obj_request);
1581 dout("%s %p done\n", __func__, obj_request);
1585 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1588 dout("%s: img %p\n", __func__, img_request);
1591 * If no error occurred, compute the aggregate transfer
1592 * count for the image request. We could instead use
1593 * atomic64_cmpxchg() to update it as each object request
1594 * completes; not clear which way is better off hand.
1596 if (!img_request->result) {
1597 struct rbd_obj_request *obj_request;
1600 for_each_obj_request(img_request, obj_request)
1601 xferred += obj_request->xferred;
1602 img_request->xferred = xferred;
1605 if (img_request->callback)
1606 img_request->callback(img_request);
1608 rbd_img_request_put(img_request);
1612 * The default/initial value for all image request flags is 0. Each
1613 * is conditionally set to 1 at image request initialization time
1614 * and currently never change thereafter.
1616 static void img_request_write_set(struct rbd_img_request *img_request)
1618 set_bit(IMG_REQ_WRITE, &img_request->flags);
1622 static bool img_request_write_test(struct rbd_img_request *img_request)
1625 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1629 * Set the discard flag when the img_request is an discard request
1631 static void img_request_discard_set(struct rbd_img_request *img_request)
1633 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1637 static bool img_request_discard_test(struct rbd_img_request *img_request)
1640 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1643 static void img_request_child_set(struct rbd_img_request *img_request)
1645 set_bit(IMG_REQ_CHILD, &img_request->flags);
1649 static void img_request_child_clear(struct rbd_img_request *img_request)
1651 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1655 static bool img_request_child_test(struct rbd_img_request *img_request)
1658 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1661 static void img_request_layered_set(struct rbd_img_request *img_request)
1663 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1667 static void img_request_layered_clear(struct rbd_img_request *img_request)
1669 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1673 static bool img_request_layered_test(struct rbd_img_request *img_request)
1676 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1679 static enum obj_operation_type
1680 rbd_img_request_op_type(struct rbd_img_request *img_request)
1682 if (img_request_write_test(img_request))
1683 return OBJ_OP_WRITE;
1684 else if (img_request_discard_test(img_request))
1685 return OBJ_OP_DISCARD;
1691 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1693 u64 xferred = obj_request->xferred;
1694 u64 length = obj_request->length;
1696 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1697 obj_request, obj_request->img_request, obj_request->result,
1700 * ENOENT means a hole in the image. We zero-fill the entire
1701 * length of the request. A short read also implies zero-fill
1702 * to the end of the request. An error requires the whole
1703 * length of the request to be reported finished with an error
1704 * to the block layer. In each case we update the xferred
1705 * count to indicate the whole request was satisfied.
1707 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1708 if (obj_request->result == -ENOENT) {
1709 if (obj_request->type == OBJ_REQUEST_BIO)
1710 zero_bio_chain(obj_request->bio_list, 0);
1712 zero_pages(obj_request->pages, 0, length);
1713 obj_request->result = 0;
1714 } else if (xferred < length && !obj_request->result) {
1715 if (obj_request->type == OBJ_REQUEST_BIO)
1716 zero_bio_chain(obj_request->bio_list, xferred);
1718 zero_pages(obj_request->pages, xferred, length);
1720 obj_request->xferred = length;
1721 obj_request_done_set(obj_request);
1724 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1726 dout("%s: obj %p cb %p\n", __func__, obj_request,
1727 obj_request->callback);
1728 if (obj_request->callback)
1729 obj_request->callback(obj_request);
1731 complete_all(&obj_request->completion);
1734 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1736 dout("%s: obj %p\n", __func__, obj_request);
1737 obj_request_done_set(obj_request);
1740 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1742 struct rbd_img_request *img_request = NULL;
1743 struct rbd_device *rbd_dev = NULL;
1744 bool layered = false;
1746 if (obj_request_img_data_test(obj_request)) {
1747 img_request = obj_request->img_request;
1748 layered = img_request && img_request_layered_test(img_request);
1749 rbd_dev = img_request->rbd_dev;
1752 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1753 obj_request, img_request, obj_request->result,
1754 obj_request->xferred, obj_request->length);
1755 if (layered && obj_request->result == -ENOENT &&
1756 obj_request->img_offset < rbd_dev->parent_overlap)
1757 rbd_img_parent_read(obj_request);
1758 else if (img_request)
1759 rbd_img_obj_request_read_callback(obj_request);
1761 obj_request_done_set(obj_request);
1764 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1766 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1767 obj_request->result, obj_request->length);
1769 * There is no such thing as a successful short write. Set
1770 * it to our originally-requested length.
1772 obj_request->xferred = obj_request->length;
1773 obj_request_done_set(obj_request);
1776 static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1778 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1779 obj_request->result, obj_request->length);
1781 * There is no such thing as a successful short discard. Set
1782 * it to our originally-requested length.
1784 obj_request->xferred = obj_request->length;
1785 /* discarding a non-existent object is not a problem */
1786 if (obj_request->result == -ENOENT)
1787 obj_request->result = 0;
1788 obj_request_done_set(obj_request);
1792 * For a simple stat call there's nothing to do. We'll do more if
1793 * this is part of a write sequence for a layered image.
1795 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1797 dout("%s: obj %p\n", __func__, obj_request);
1798 obj_request_done_set(obj_request);
1801 static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1803 dout("%s: obj %p\n", __func__, obj_request);
1805 if (obj_request_img_data_test(obj_request))
1806 rbd_osd_copyup_callback(obj_request);
1808 obj_request_done_set(obj_request);
1811 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1812 struct ceph_msg *msg)
1814 struct rbd_obj_request *obj_request = osd_req->r_priv;
1817 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1818 rbd_assert(osd_req == obj_request->osd_req);
1819 if (obj_request_img_data_test(obj_request)) {
1820 rbd_assert(obj_request->img_request);
1821 rbd_assert(obj_request->which != BAD_WHICH);
1823 rbd_assert(obj_request->which == BAD_WHICH);
1826 if (osd_req->r_result < 0)
1827 obj_request->result = osd_req->r_result;
1829 rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
1832 * We support a 64-bit length, but ultimately it has to be
1833 * passed to the block layer, which just supports a 32-bit
1836 obj_request->xferred = osd_req->r_reply_op_len[0];
1837 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1839 opcode = osd_req->r_ops[0].op;
1841 case CEPH_OSD_OP_READ:
1842 rbd_osd_read_callback(obj_request);
1844 case CEPH_OSD_OP_SETALLOCHINT:
1845 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
1847 case CEPH_OSD_OP_WRITE:
1848 rbd_osd_write_callback(obj_request);
1850 case CEPH_OSD_OP_STAT:
1851 rbd_osd_stat_callback(obj_request);
1853 case CEPH_OSD_OP_DELETE:
1854 case CEPH_OSD_OP_TRUNCATE:
1855 case CEPH_OSD_OP_ZERO:
1856 rbd_osd_discard_callback(obj_request);
1858 case CEPH_OSD_OP_CALL:
1859 rbd_osd_call_callback(obj_request);
1861 case CEPH_OSD_OP_NOTIFY_ACK:
1862 case CEPH_OSD_OP_WATCH:
1863 rbd_osd_trivial_callback(obj_request);
1866 rbd_warn(NULL, "%s: unsupported op %hu",
1867 obj_request->object_name, (unsigned short) opcode);
1871 if (obj_request_done_test(obj_request))
1872 rbd_obj_request_complete(obj_request);
1875 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1877 struct rbd_img_request *img_request = obj_request->img_request;
1878 struct ceph_osd_request *osd_req = obj_request->osd_req;
1881 rbd_assert(osd_req != NULL);
1883 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1884 ceph_osdc_build_request(osd_req, obj_request->offset,
1885 NULL, snap_id, NULL);
1888 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1890 struct rbd_img_request *img_request = obj_request->img_request;
1891 struct ceph_osd_request *osd_req = obj_request->osd_req;
1892 struct ceph_snap_context *snapc;
1893 struct timespec mtime = CURRENT_TIME;
1895 rbd_assert(osd_req != NULL);
1897 snapc = img_request ? img_request->snapc : NULL;
1898 ceph_osdc_build_request(osd_req, obj_request->offset,
1899 snapc, CEPH_NOSNAP, &mtime);
1903 * Create an osd request. A read request has one osd op (read).
1904 * A write request has either one (watch) or two (hint+write) osd ops.
1905 * (All rbd data writes are prefixed with an allocation hint op, but
1906 * technically osd watch is a write request, hence this distinction.)
1908 static struct ceph_osd_request *rbd_osd_req_create(
1909 struct rbd_device *rbd_dev,
1910 enum obj_operation_type op_type,
1911 unsigned int num_ops,
1912 struct rbd_obj_request *obj_request)
1914 struct ceph_snap_context *snapc = NULL;
1915 struct ceph_osd_client *osdc;
1916 struct ceph_osd_request *osd_req;
1918 if (obj_request_img_data_test(obj_request) &&
1919 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1920 struct rbd_img_request *img_request = obj_request->img_request;
1921 if (op_type == OBJ_OP_WRITE) {
1922 rbd_assert(img_request_write_test(img_request));
1924 rbd_assert(img_request_discard_test(img_request));
1926 snapc = img_request->snapc;
1929 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1931 /* Allocate and initialize the request, for the num_ops ops */
1933 osdc = &rbd_dev->rbd_client->client->osdc;
1934 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1937 return NULL; /* ENOMEM */
1939 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
1940 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1942 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1944 osd_req->r_callback = rbd_osd_req_callback;
1945 osd_req->r_priv = obj_request;
1947 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1948 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1954 * Create a copyup osd request based on the information in the object
1955 * request supplied. A copyup request has two or three osd ops, a
1956 * copyup method call, potentially a hint op, and a write or truncate
1959 static struct ceph_osd_request *
1960 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1962 struct rbd_img_request *img_request;
1963 struct ceph_snap_context *snapc;
1964 struct rbd_device *rbd_dev;
1965 struct ceph_osd_client *osdc;
1966 struct ceph_osd_request *osd_req;
1967 int num_osd_ops = 3;
1969 rbd_assert(obj_request_img_data_test(obj_request));
1970 img_request = obj_request->img_request;
1971 rbd_assert(img_request);
1972 rbd_assert(img_request_write_test(img_request) ||
1973 img_request_discard_test(img_request));
1975 if (img_request_discard_test(img_request))
1978 /* Allocate and initialize the request, for all the ops */
1980 snapc = img_request->snapc;
1981 rbd_dev = img_request->rbd_dev;
1982 osdc = &rbd_dev->rbd_client->client->osdc;
1983 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
1986 return NULL; /* ENOMEM */
1988 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1989 osd_req->r_callback = rbd_osd_req_callback;
1990 osd_req->r_priv = obj_request;
1992 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1993 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1999 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2001 ceph_osdc_put_request(osd_req);
2004 /* object_name is assumed to be a non-null pointer and NUL-terminated */
2006 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2007 u64 offset, u64 length,
2008 enum obj_request_type type)
2010 struct rbd_obj_request *obj_request;
2014 rbd_assert(obj_request_type_valid(type));
2016 size = strlen(object_name) + 1;
2017 name = kmalloc(size, GFP_NOIO);
2021 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
2027 obj_request->object_name = memcpy(name, object_name, size);
2028 obj_request->offset = offset;
2029 obj_request->length = length;
2030 obj_request->flags = 0;
2031 obj_request->which = BAD_WHICH;
2032 obj_request->type = type;
2033 INIT_LIST_HEAD(&obj_request->links);
2034 init_completion(&obj_request->completion);
2035 kref_init(&obj_request->kref);
2037 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2038 offset, length, (int)type, obj_request);
2043 static void rbd_obj_request_destroy(struct kref *kref)
2045 struct rbd_obj_request *obj_request;
2047 obj_request = container_of(kref, struct rbd_obj_request, kref);
2049 dout("%s: obj %p\n", __func__, obj_request);
2051 rbd_assert(obj_request->img_request == NULL);
2052 rbd_assert(obj_request->which == BAD_WHICH);
2054 if (obj_request->osd_req)
2055 rbd_osd_req_destroy(obj_request->osd_req);
2057 rbd_assert(obj_request_type_valid(obj_request->type));
2058 switch (obj_request->type) {
2059 case OBJ_REQUEST_NODATA:
2060 break; /* Nothing to do */
2061 case OBJ_REQUEST_BIO:
2062 if (obj_request->bio_list)
2063 bio_chain_put(obj_request->bio_list);
2065 case OBJ_REQUEST_PAGES:
2066 if (obj_request->pages)
2067 ceph_release_page_vector(obj_request->pages,
2068 obj_request->page_count);
2072 kfree(obj_request->object_name);
2073 obj_request->object_name = NULL;
2074 kmem_cache_free(rbd_obj_request_cache, obj_request);
2077 /* It's OK to call this for a device with no parent */
2079 static void rbd_spec_put(struct rbd_spec *spec);
2080 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2082 rbd_dev_remove_parent(rbd_dev);
2083 rbd_spec_put(rbd_dev->parent_spec);
2084 rbd_dev->parent_spec = NULL;
2085 rbd_dev->parent_overlap = 0;
2089 * Parent image reference counting is used to determine when an
2090 * image's parent fields can be safely torn down--after there are no
2091 * more in-flight requests to the parent image. When the last
2092 * reference is dropped, cleaning them up is safe.
2094 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2098 if (!rbd_dev->parent_spec)
2101 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2105 /* Last reference; clean up parent data structures */
2108 rbd_dev_unparent(rbd_dev);
2110 rbd_warn(rbd_dev, "parent reference underflow");
2114 * If an image has a non-zero parent overlap, get a reference to its
2117 * Returns true if the rbd device has a parent with a non-zero
2118 * overlap and a reference for it was successfully taken, or
2121 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2125 if (!rbd_dev->parent_spec)
2128 down_read(&rbd_dev->header_rwsem);
2129 if (rbd_dev->parent_overlap)
2130 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2131 up_read(&rbd_dev->header_rwsem);
2134 rbd_warn(rbd_dev, "parent reference overflow");
2140 * Caller is responsible for filling in the list of object requests
2141 * that comprises the image request, and the Linux request pointer
2142 * (if there is one).
2144 static struct rbd_img_request *rbd_img_request_create(
2145 struct rbd_device *rbd_dev,
2146 u64 offset, u64 length,
2147 enum obj_operation_type op_type,
2148 struct ceph_snap_context *snapc)
2150 struct rbd_img_request *img_request;
2152 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2156 img_request->rq = NULL;
2157 img_request->rbd_dev = rbd_dev;
2158 img_request->offset = offset;
2159 img_request->length = length;
2160 img_request->flags = 0;
2161 if (op_type == OBJ_OP_DISCARD) {
2162 img_request_discard_set(img_request);
2163 img_request->snapc = snapc;
2164 } else if (op_type == OBJ_OP_WRITE) {
2165 img_request_write_set(img_request);
2166 img_request->snapc = snapc;
2168 img_request->snap_id = rbd_dev->spec->snap_id;
2170 if (rbd_dev_parent_get(rbd_dev))
2171 img_request_layered_set(img_request);
2172 spin_lock_init(&img_request->completion_lock);
2173 img_request->next_completion = 0;
2174 img_request->callback = NULL;
2175 img_request->result = 0;
2176 img_request->obj_request_count = 0;
2177 INIT_LIST_HEAD(&img_request->obj_requests);
2178 kref_init(&img_request->kref);
2180 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2181 obj_op_name(op_type), offset, length, img_request);
2186 static void rbd_img_request_destroy(struct kref *kref)
2188 struct rbd_img_request *img_request;
2189 struct rbd_obj_request *obj_request;
2190 struct rbd_obj_request *next_obj_request;
2192 img_request = container_of(kref, struct rbd_img_request, kref);
2194 dout("%s: img %p\n", __func__, img_request);
2196 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2197 rbd_img_obj_request_del(img_request, obj_request);
2198 rbd_assert(img_request->obj_request_count == 0);
2200 if (img_request_layered_test(img_request)) {
2201 img_request_layered_clear(img_request);
2202 rbd_dev_parent_put(img_request->rbd_dev);
2205 if (img_request_write_test(img_request) ||
2206 img_request_discard_test(img_request))
2207 ceph_put_snap_context(img_request->snapc);
2209 kmem_cache_free(rbd_img_request_cache, img_request);
2212 static struct rbd_img_request *rbd_parent_request_create(
2213 struct rbd_obj_request *obj_request,
2214 u64 img_offset, u64 length)
2216 struct rbd_img_request *parent_request;
2217 struct rbd_device *rbd_dev;
2219 rbd_assert(obj_request->img_request);
2220 rbd_dev = obj_request->img_request->rbd_dev;
2222 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
2223 length, OBJ_OP_READ, NULL);
2224 if (!parent_request)
2227 img_request_child_set(parent_request);
2228 rbd_obj_request_get(obj_request);
2229 parent_request->obj_request = obj_request;
2231 return parent_request;
2234 static void rbd_parent_request_destroy(struct kref *kref)
2236 struct rbd_img_request *parent_request;
2237 struct rbd_obj_request *orig_request;
2239 parent_request = container_of(kref, struct rbd_img_request, kref);
2240 orig_request = parent_request->obj_request;
2242 parent_request->obj_request = NULL;
2243 rbd_obj_request_put(orig_request);
2244 img_request_child_clear(parent_request);
2246 rbd_img_request_destroy(kref);
2249 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2251 struct rbd_img_request *img_request;
2252 unsigned int xferred;
2256 rbd_assert(obj_request_img_data_test(obj_request));
2257 img_request = obj_request->img_request;
2259 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2260 xferred = (unsigned int)obj_request->xferred;
2261 result = obj_request->result;
2263 struct rbd_device *rbd_dev = img_request->rbd_dev;
2264 enum obj_operation_type op_type;
2266 if (img_request_discard_test(img_request))
2267 op_type = OBJ_OP_DISCARD;
2268 else if (img_request_write_test(img_request))
2269 op_type = OBJ_OP_WRITE;
2271 op_type = OBJ_OP_READ;
2273 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
2274 obj_op_name(op_type), obj_request->length,
2275 obj_request->img_offset, obj_request->offset);
2276 rbd_warn(rbd_dev, " result %d xferred %x",
2278 if (!img_request->result)
2279 img_request->result = result;
2281 * Need to end I/O on the entire obj_request worth of
2282 * bytes in case of error.
2284 xferred = obj_request->length;
2287 /* Image object requests don't own their page array */
2289 if (obj_request->type == OBJ_REQUEST_PAGES) {
2290 obj_request->pages = NULL;
2291 obj_request->page_count = 0;
2294 if (img_request_child_test(img_request)) {
2295 rbd_assert(img_request->obj_request != NULL);
2296 more = obj_request->which < img_request->obj_request_count - 1;
2298 rbd_assert(img_request->rq != NULL);
2300 more = blk_update_request(img_request->rq, result, xferred);
2302 __blk_mq_end_request(img_request->rq, result);
2308 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2310 struct rbd_img_request *img_request;
2311 u32 which = obj_request->which;
2314 rbd_assert(obj_request_img_data_test(obj_request));
2315 img_request = obj_request->img_request;
2317 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2318 rbd_assert(img_request != NULL);
2319 rbd_assert(img_request->obj_request_count > 0);
2320 rbd_assert(which != BAD_WHICH);
2321 rbd_assert(which < img_request->obj_request_count);
2323 spin_lock_irq(&img_request->completion_lock);
2324 if (which != img_request->next_completion)
2327 for_each_obj_request_from(img_request, obj_request) {
2329 rbd_assert(which < img_request->obj_request_count);
2331 if (!obj_request_done_test(obj_request))
2333 more = rbd_img_obj_end_request(obj_request);
2337 rbd_assert(more ^ (which == img_request->obj_request_count));
2338 img_request->next_completion = which;
2340 spin_unlock_irq(&img_request->completion_lock);
2341 rbd_img_request_put(img_request);
2344 rbd_img_request_complete(img_request);
2348 * Add individual osd ops to the given ceph_osd_request and prepare
2349 * them for submission. num_ops is the current number of
2350 * osd operations already to the object request.
2352 static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2353 struct ceph_osd_request *osd_request,
2354 enum obj_operation_type op_type,
2355 unsigned int num_ops)
2357 struct rbd_img_request *img_request = obj_request->img_request;
2358 struct rbd_device *rbd_dev = img_request->rbd_dev;
2359 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2360 u64 offset = obj_request->offset;
2361 u64 length = obj_request->length;
2365 if (op_type == OBJ_OP_DISCARD) {
2366 if (!offset && length == object_size &&
2367 (!img_request_layered_test(img_request) ||
2368 !obj_request_overlaps_parent(obj_request))) {
2369 opcode = CEPH_OSD_OP_DELETE;
2370 } else if ((offset + length == object_size)) {
2371 opcode = CEPH_OSD_OP_TRUNCATE;
2373 down_read(&rbd_dev->header_rwsem);
2374 img_end = rbd_dev->header.image_size;
2375 up_read(&rbd_dev->header_rwsem);
2377 if (obj_request->img_offset + length == img_end)
2378 opcode = CEPH_OSD_OP_TRUNCATE;
2380 opcode = CEPH_OSD_OP_ZERO;
2382 } else if (op_type == OBJ_OP_WRITE) {
2383 opcode = CEPH_OSD_OP_WRITE;
2384 osd_req_op_alloc_hint_init(osd_request, num_ops,
2385 object_size, object_size);
2388 opcode = CEPH_OSD_OP_READ;
2391 if (opcode == CEPH_OSD_OP_DELETE)
2392 osd_req_op_init(osd_request, num_ops, opcode);
2394 osd_req_op_extent_init(osd_request, num_ops, opcode,
2395 offset, length, 0, 0);
2397 if (obj_request->type == OBJ_REQUEST_BIO)
2398 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2399 obj_request->bio_list, length);
2400 else if (obj_request->type == OBJ_REQUEST_PAGES)
2401 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2402 obj_request->pages, length,
2403 offset & ~PAGE_MASK, false, false);
2405 /* Discards are also writes */
2406 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2407 rbd_osd_req_format_write(obj_request);
2409 rbd_osd_req_format_read(obj_request);
2413 * Split up an image request into one or more object requests, each
2414 * to a different object. The "type" parameter indicates whether
2415 * "data_desc" is the pointer to the head of a list of bio
2416 * structures, or the base of a page array. In either case this
2417 * function assumes data_desc describes memory sufficient to hold
2418 * all data described by the image request.
2420 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2421 enum obj_request_type type,
2424 struct rbd_device *rbd_dev = img_request->rbd_dev;
2425 struct rbd_obj_request *obj_request = NULL;
2426 struct rbd_obj_request *next_obj_request;
2427 struct bio *bio_list = NULL;
2428 unsigned int bio_offset = 0;
2429 struct page **pages = NULL;
2430 enum obj_operation_type op_type;
2434 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2435 (int)type, data_desc);
2437 img_offset = img_request->offset;
2438 resid = img_request->length;
2439 rbd_assert(resid > 0);
2440 op_type = rbd_img_request_op_type(img_request);
2442 if (type == OBJ_REQUEST_BIO) {
2443 bio_list = data_desc;
2444 rbd_assert(img_offset ==
2445 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2446 } else if (type == OBJ_REQUEST_PAGES) {
2451 struct ceph_osd_request *osd_req;
2452 const char *object_name;
2456 object_name = rbd_segment_name(rbd_dev, img_offset);
2459 offset = rbd_segment_offset(rbd_dev, img_offset);
2460 length = rbd_segment_length(rbd_dev, img_offset, resid);
2461 obj_request = rbd_obj_request_create(object_name,
2462 offset, length, type);
2463 /* object request has its own copy of the object name */
2464 rbd_segment_name_free(object_name);
2469 * set obj_request->img_request before creating the
2470 * osd_request so that it gets the right snapc
2472 rbd_img_obj_request_add(img_request, obj_request);
2474 if (type == OBJ_REQUEST_BIO) {
2475 unsigned int clone_size;
2477 rbd_assert(length <= (u64)UINT_MAX);
2478 clone_size = (unsigned int)length;
2479 obj_request->bio_list =
2480 bio_chain_clone_range(&bio_list,
2484 if (!obj_request->bio_list)
2486 } else if (type == OBJ_REQUEST_PAGES) {
2487 unsigned int page_count;
2489 obj_request->pages = pages;
2490 page_count = (u32)calc_pages_for(offset, length);
2491 obj_request->page_count = page_count;
2492 if ((offset + length) & ~PAGE_MASK)
2493 page_count--; /* more on last page */
2494 pages += page_count;
2497 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2498 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2503 obj_request->osd_req = osd_req;
2504 obj_request->callback = rbd_img_obj_callback;
2505 obj_request->img_offset = img_offset;
2507 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2509 rbd_img_request_get(img_request);
2511 img_offset += length;
2518 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2519 rbd_img_obj_request_del(img_request, obj_request);
2525 rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2527 struct rbd_img_request *img_request;
2528 struct rbd_device *rbd_dev;
2529 struct page **pages;
2532 dout("%s: obj %p\n", __func__, obj_request);
2534 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2535 obj_request->type == OBJ_REQUEST_NODATA);
2536 rbd_assert(obj_request_img_data_test(obj_request));
2537 img_request = obj_request->img_request;
2538 rbd_assert(img_request);
2540 rbd_dev = img_request->rbd_dev;
2541 rbd_assert(rbd_dev);
2543 pages = obj_request->copyup_pages;
2544 rbd_assert(pages != NULL);
2545 obj_request->copyup_pages = NULL;
2546 page_count = obj_request->copyup_page_count;
2547 rbd_assert(page_count);
2548 obj_request->copyup_page_count = 0;
2549 ceph_release_page_vector(pages, page_count);
2552 * We want the transfer count to reflect the size of the
2553 * original write request. There is no such thing as a
2554 * successful short write, so if the request was successful
2555 * we can just set it to the originally-requested length.
2557 if (!obj_request->result)
2558 obj_request->xferred = obj_request->length;
2560 obj_request_done_set(obj_request);
2564 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2566 struct rbd_obj_request *orig_request;
2567 struct ceph_osd_request *osd_req;
2568 struct ceph_osd_client *osdc;
2569 struct rbd_device *rbd_dev;
2570 struct page **pages;
2571 enum obj_operation_type op_type;
2576 rbd_assert(img_request_child_test(img_request));
2578 /* First get what we need from the image request */
2580 pages = img_request->copyup_pages;
2581 rbd_assert(pages != NULL);
2582 img_request->copyup_pages = NULL;
2583 page_count = img_request->copyup_page_count;
2584 rbd_assert(page_count);
2585 img_request->copyup_page_count = 0;
2587 orig_request = img_request->obj_request;
2588 rbd_assert(orig_request != NULL);
2589 rbd_assert(obj_request_type_valid(orig_request->type));
2590 img_result = img_request->result;
2591 parent_length = img_request->length;
2592 rbd_assert(parent_length == img_request->xferred);
2593 rbd_img_request_put(img_request);
2595 rbd_assert(orig_request->img_request);
2596 rbd_dev = orig_request->img_request->rbd_dev;
2597 rbd_assert(rbd_dev);
2600 * If the overlap has become 0 (most likely because the
2601 * image has been flattened) we need to free the pages
2602 * and re-submit the original write request.
2604 if (!rbd_dev->parent_overlap) {
2605 struct ceph_osd_client *osdc;
2607 ceph_release_page_vector(pages, page_count);
2608 osdc = &rbd_dev->rbd_client->client->osdc;
2609 img_result = rbd_obj_request_submit(osdc, orig_request);
2618 * The original osd request is of no use to use any more.
2619 * We need a new one that can hold the three ops in a copyup
2620 * request. Allocate the new copyup osd request for the
2621 * original request, and release the old one.
2623 img_result = -ENOMEM;
2624 osd_req = rbd_osd_req_create_copyup(orig_request);
2627 rbd_osd_req_destroy(orig_request->osd_req);
2628 orig_request->osd_req = osd_req;
2629 orig_request->copyup_pages = pages;
2630 orig_request->copyup_page_count = page_count;
2632 /* Initialize the copyup op */
2634 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2635 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2638 /* Add the other op(s) */
2640 op_type = rbd_img_request_op_type(orig_request->img_request);
2641 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
2643 /* All set, send it off. */
2645 osdc = &rbd_dev->rbd_client->client->osdc;
2646 img_result = rbd_obj_request_submit(osdc, orig_request);
2650 /* Record the error code and complete the request */
2652 orig_request->result = img_result;
2653 orig_request->xferred = 0;
2654 obj_request_done_set(orig_request);
2655 rbd_obj_request_complete(orig_request);
2659 * Read from the parent image the range of data that covers the
2660 * entire target of the given object request. This is used for
2661 * satisfying a layered image write request when the target of an
2662 * object request from the image request does not exist.
2664 * A page array big enough to hold the returned data is allocated
2665 * and supplied to rbd_img_request_fill() as the "data descriptor."
2666 * When the read completes, this page array will be transferred to
2667 * the original object request for the copyup operation.
2669 * If an error occurs, record it as the result of the original
2670 * object request and mark it done so it gets completed.
2672 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2674 struct rbd_img_request *img_request = NULL;
2675 struct rbd_img_request *parent_request = NULL;
2676 struct rbd_device *rbd_dev;
2679 struct page **pages = NULL;
2683 rbd_assert(obj_request_img_data_test(obj_request));
2684 rbd_assert(obj_request_type_valid(obj_request->type));
2686 img_request = obj_request->img_request;
2687 rbd_assert(img_request != NULL);
2688 rbd_dev = img_request->rbd_dev;
2689 rbd_assert(rbd_dev->parent != NULL);
2692 * Determine the byte range covered by the object in the
2693 * child image to which the original request was to be sent.
2695 img_offset = obj_request->img_offset - obj_request->offset;
2696 length = (u64)1 << rbd_dev->header.obj_order;
2699 * There is no defined parent data beyond the parent
2700 * overlap, so limit what we read at that boundary if
2703 if (img_offset + length > rbd_dev->parent_overlap) {
2704 rbd_assert(img_offset < rbd_dev->parent_overlap);
2705 length = rbd_dev->parent_overlap - img_offset;
2709 * Allocate a page array big enough to receive the data read
2712 page_count = (u32)calc_pages_for(0, length);
2713 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2714 if (IS_ERR(pages)) {
2715 result = PTR_ERR(pages);
2721 parent_request = rbd_parent_request_create(obj_request,
2722 img_offset, length);
2723 if (!parent_request)
2726 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2729 parent_request->copyup_pages = pages;
2730 parent_request->copyup_page_count = page_count;
2732 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2733 result = rbd_img_request_submit(parent_request);
2737 parent_request->copyup_pages = NULL;
2738 parent_request->copyup_page_count = 0;
2739 parent_request->obj_request = NULL;
2740 rbd_obj_request_put(obj_request);
2743 ceph_release_page_vector(pages, page_count);
2745 rbd_img_request_put(parent_request);
2746 obj_request->result = result;
2747 obj_request->xferred = 0;
2748 obj_request_done_set(obj_request);
2753 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2755 struct rbd_obj_request *orig_request;
2756 struct rbd_device *rbd_dev;
2759 rbd_assert(!obj_request_img_data_test(obj_request));
2762 * All we need from the object request is the original
2763 * request and the result of the STAT op. Grab those, then
2764 * we're done with the request.
2766 orig_request = obj_request->obj_request;
2767 obj_request->obj_request = NULL;
2768 rbd_obj_request_put(orig_request);
2769 rbd_assert(orig_request);
2770 rbd_assert(orig_request->img_request);
2772 result = obj_request->result;
2773 obj_request->result = 0;
2775 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2776 obj_request, orig_request, result,
2777 obj_request->xferred, obj_request->length);
2778 rbd_obj_request_put(obj_request);
2781 * If the overlap has become 0 (most likely because the
2782 * image has been flattened) we need to free the pages
2783 * and re-submit the original write request.
2785 rbd_dev = orig_request->img_request->rbd_dev;
2786 if (!rbd_dev->parent_overlap) {
2787 struct ceph_osd_client *osdc;
2789 osdc = &rbd_dev->rbd_client->client->osdc;
2790 result = rbd_obj_request_submit(osdc, orig_request);
2796 * Our only purpose here is to determine whether the object
2797 * exists, and we don't want to treat the non-existence as
2798 * an error. If something else comes back, transfer the
2799 * error to the original request and complete it now.
2802 obj_request_existence_set(orig_request, true);
2803 } else if (result == -ENOENT) {
2804 obj_request_existence_set(orig_request, false);
2805 } else if (result) {
2806 orig_request->result = result;
2811 * Resubmit the original request now that we have recorded
2812 * whether the target object exists.
2814 orig_request->result = rbd_img_obj_request_submit(orig_request);
2816 if (orig_request->result)
2817 rbd_obj_request_complete(orig_request);
2820 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2822 struct rbd_obj_request *stat_request;
2823 struct rbd_device *rbd_dev;
2824 struct ceph_osd_client *osdc;
2825 struct page **pages = NULL;
2831 * The response data for a STAT call consists of:
2838 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2839 page_count = (u32)calc_pages_for(0, size);
2840 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2842 return PTR_ERR(pages);
2845 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2850 rbd_obj_request_get(obj_request);
2851 stat_request->obj_request = obj_request;
2852 stat_request->pages = pages;
2853 stat_request->page_count = page_count;
2855 rbd_assert(obj_request->img_request);
2856 rbd_dev = obj_request->img_request->rbd_dev;
2857 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2859 if (!stat_request->osd_req)
2861 stat_request->callback = rbd_img_obj_exists_callback;
2863 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2864 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2866 rbd_osd_req_format_read(stat_request);
2868 osdc = &rbd_dev->rbd_client->client->osdc;
2869 ret = rbd_obj_request_submit(osdc, stat_request);
2872 rbd_obj_request_put(obj_request);
2877 static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2879 struct rbd_img_request *img_request;
2880 struct rbd_device *rbd_dev;
2882 rbd_assert(obj_request_img_data_test(obj_request));
2884 img_request = obj_request->img_request;
2885 rbd_assert(img_request);
2886 rbd_dev = img_request->rbd_dev;
2889 if (!img_request_write_test(img_request) &&
2890 !img_request_discard_test(img_request))
2893 /* Non-layered writes */
2894 if (!img_request_layered_test(img_request))
2898 * Layered writes outside of the parent overlap range don't
2899 * share any data with the parent.
2901 if (!obj_request_overlaps_parent(obj_request))
2905 * Entire-object layered writes - we will overwrite whatever
2906 * parent data there is anyway.
2908 if (!obj_request->offset &&
2909 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2913 * If the object is known to already exist, its parent data has
2914 * already been copied.
2916 if (obj_request_known_test(obj_request) &&
2917 obj_request_exists_test(obj_request))
2923 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2925 if (img_obj_request_simple(obj_request)) {
2926 struct rbd_device *rbd_dev;
2927 struct ceph_osd_client *osdc;
2929 rbd_dev = obj_request->img_request->rbd_dev;
2930 osdc = &rbd_dev->rbd_client->client->osdc;
2932 return rbd_obj_request_submit(osdc, obj_request);
2936 * It's a layered write. The target object might exist but
2937 * we may not know that yet. If we know it doesn't exist,
2938 * start by reading the data for the full target object from
2939 * the parent so we can use it for a copyup to the target.
2941 if (obj_request_known_test(obj_request))
2942 return rbd_img_obj_parent_read_full(obj_request);
2944 /* We don't know whether the target exists. Go find out. */
2946 return rbd_img_obj_exists_submit(obj_request);
2949 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2951 struct rbd_obj_request *obj_request;
2952 struct rbd_obj_request *next_obj_request;
2954 dout("%s: img %p\n", __func__, img_request);
2955 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2958 ret = rbd_img_obj_request_submit(obj_request);
2966 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2968 struct rbd_obj_request *obj_request;
2969 struct rbd_device *rbd_dev;
2974 rbd_assert(img_request_child_test(img_request));
2976 /* First get what we need from the image request and release it */
2978 obj_request = img_request->obj_request;
2979 img_xferred = img_request->xferred;
2980 img_result = img_request->result;
2981 rbd_img_request_put(img_request);
2984 * If the overlap has become 0 (most likely because the
2985 * image has been flattened) we need to re-submit the
2988 rbd_assert(obj_request);
2989 rbd_assert(obj_request->img_request);
2990 rbd_dev = obj_request->img_request->rbd_dev;
2991 if (!rbd_dev->parent_overlap) {
2992 struct ceph_osd_client *osdc;
2994 osdc = &rbd_dev->rbd_client->client->osdc;
2995 img_result = rbd_obj_request_submit(osdc, obj_request);
3000 obj_request->result = img_result;
3001 if (obj_request->result)
3005 * We need to zero anything beyond the parent overlap
3006 * boundary. Since rbd_img_obj_request_read_callback()
3007 * will zero anything beyond the end of a short read, an
3008 * easy way to do this is to pretend the data from the
3009 * parent came up short--ending at the overlap boundary.
3011 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3012 obj_end = obj_request->img_offset + obj_request->length;
3013 if (obj_end > rbd_dev->parent_overlap) {
3016 if (obj_request->img_offset < rbd_dev->parent_overlap)
3017 xferred = rbd_dev->parent_overlap -
3018 obj_request->img_offset;
3020 obj_request->xferred = min(img_xferred, xferred);
3022 obj_request->xferred = img_xferred;
3025 rbd_img_obj_request_read_callback(obj_request);
3026 rbd_obj_request_complete(obj_request);
3029 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3031 struct rbd_img_request *img_request;
3034 rbd_assert(obj_request_img_data_test(obj_request));
3035 rbd_assert(obj_request->img_request != NULL);
3036 rbd_assert(obj_request->result == (s32) -ENOENT);
3037 rbd_assert(obj_request_type_valid(obj_request->type));
3039 /* rbd_read_finish(obj_request, obj_request->length); */
3040 img_request = rbd_parent_request_create(obj_request,
3041 obj_request->img_offset,
3042 obj_request->length);
3047 if (obj_request->type == OBJ_REQUEST_BIO)
3048 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3049 obj_request->bio_list);
3051 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3052 obj_request->pages);
3056 img_request->callback = rbd_img_parent_read_callback;
3057 result = rbd_img_request_submit(img_request);
3064 rbd_img_request_put(img_request);
3065 obj_request->result = result;
3066 obj_request->xferred = 0;
3067 obj_request_done_set(obj_request);
3070 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
3072 struct rbd_obj_request *obj_request;
3073 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3076 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3077 OBJ_REQUEST_NODATA);
3082 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3084 if (!obj_request->osd_req)
3087 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
3089 rbd_osd_req_format_read(obj_request);
3091 ret = rbd_obj_request_submit(osdc, obj_request);
3094 ret = rbd_obj_request_wait(obj_request);
3096 rbd_obj_request_put(obj_request);
3101 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3103 struct rbd_device *rbd_dev = (struct rbd_device *)data;
3109 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
3110 rbd_dev->header_name, (unsigned long long)notify_id,
3111 (unsigned int)opcode);
3114 * Until adequate refresh error handling is in place, there is
3115 * not much we can do here, except warn.
3117 * See http://tracker.ceph.com/issues/5040
3119 ret = rbd_dev_refresh(rbd_dev);
3121 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3123 ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
3125 rbd_warn(rbd_dev, "notify_ack ret %d", ret);
3129 * Send a (un)watch request and wait for the ack. Return a request
3130 * with a ref held on success or error.
3132 static struct rbd_obj_request *rbd_obj_watch_request_helper(
3133 struct rbd_device *rbd_dev,
3136 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3137 struct rbd_obj_request *obj_request;
3140 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3141 OBJ_REQUEST_NODATA);
3143 return ERR_PTR(-ENOMEM);
3145 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
3147 if (!obj_request->osd_req) {
3152 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3153 rbd_dev->watch_event->cookie, 0, watch);
3154 rbd_osd_req_format_write(obj_request);
3157 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3159 ret = rbd_obj_request_submit(osdc, obj_request);
3163 ret = rbd_obj_request_wait(obj_request);
3167 ret = obj_request->result;
3170 rbd_obj_request_end(obj_request);
3177 rbd_obj_request_put(obj_request);
3178 return ERR_PTR(ret);
3182 * Initiate a watch request, synchronously.
3184 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
3186 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3187 struct rbd_obj_request *obj_request;
3190 rbd_assert(!rbd_dev->watch_event);
3191 rbd_assert(!rbd_dev->watch_request);
3193 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
3194 &rbd_dev->watch_event);
3198 obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
3199 if (IS_ERR(obj_request)) {
3200 ceph_osdc_cancel_event(rbd_dev->watch_event);
3201 rbd_dev->watch_event = NULL;
3202 return PTR_ERR(obj_request);
3206 * A watch request is set to linger, so the underlying osd
3207 * request won't go away until we unregister it. We retain
3208 * a pointer to the object request during that time (in
3209 * rbd_dev->watch_request), so we'll keep a reference to it.
3210 * We'll drop that reference after we've unregistered it in
3211 * rbd_dev_header_unwatch_sync().
3213 rbd_dev->watch_request = obj_request;
3219 * Tear down a watch request, synchronously.
3221 static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3223 struct rbd_obj_request *obj_request;
3225 rbd_assert(rbd_dev->watch_event);
3226 rbd_assert(rbd_dev->watch_request);
3228 rbd_obj_request_end(rbd_dev->watch_request);
3229 rbd_obj_request_put(rbd_dev->watch_request);
3230 rbd_dev->watch_request = NULL;
3232 obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
3233 if (!IS_ERR(obj_request))
3234 rbd_obj_request_put(obj_request);
3236 rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
3237 PTR_ERR(obj_request));
3239 ceph_osdc_cancel_event(rbd_dev->watch_event);
3240 rbd_dev->watch_event = NULL;
3244 * Synchronous osd object method call. Returns the number of bytes
3245 * returned in the outbound buffer, or a negative error code.
3247 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3248 const char *object_name,
3249 const char *class_name,
3250 const char *method_name,
3251 const void *outbound,
3252 size_t outbound_size,
3254 size_t inbound_size)
3256 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3257 struct rbd_obj_request *obj_request;
3258 struct page **pages;
3263 * Method calls are ultimately read operations. The result
3264 * should placed into the inbound buffer provided. They
3265 * also supply outbound data--parameters for the object
3266 * method. Currently if this is present it will be a
3269 page_count = (u32)calc_pages_for(0, inbound_size);
3270 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3272 return PTR_ERR(pages);
3275 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3280 obj_request->pages = pages;
3281 obj_request->page_count = page_count;
3283 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3285 if (!obj_request->osd_req)
3288 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3289 class_name, method_name);
3290 if (outbound_size) {
3291 struct ceph_pagelist *pagelist;
3293 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3297 ceph_pagelist_init(pagelist);
3298 ceph_pagelist_append(pagelist, outbound, outbound_size);
3299 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3302 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3303 obj_request->pages, inbound_size,
3305 rbd_osd_req_format_read(obj_request);
3307 ret = rbd_obj_request_submit(osdc, obj_request);
3310 ret = rbd_obj_request_wait(obj_request);
3314 ret = obj_request->result;
3318 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3319 ret = (int)obj_request->xferred;
3320 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3323 rbd_obj_request_put(obj_request);
3325 ceph_release_page_vector(pages, page_count);
3330 static void rbd_queue_workfn(struct work_struct *work)
3332 struct request *rq = blk_mq_rq_from_pdu(work);
3333 struct rbd_device *rbd_dev = rq->q->queuedata;
3334 struct rbd_img_request *img_request;
3335 struct ceph_snap_context *snapc = NULL;
3336 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3337 u64 length = blk_rq_bytes(rq);
3338 enum obj_operation_type op_type;
3342 if (rq->cmd_type != REQ_TYPE_FS) {
3343 dout("%s: non-fs request type %d\n", __func__,
3344 (int) rq->cmd_type);
3349 if (rq->cmd_flags & REQ_DISCARD)
3350 op_type = OBJ_OP_DISCARD;
3351 else if (rq->cmd_flags & REQ_WRITE)
3352 op_type = OBJ_OP_WRITE;
3354 op_type = OBJ_OP_READ;
3356 /* Ignore/skip any zero-length requests */
3359 dout("%s: zero-length request\n", __func__);
3364 /* Only reads are allowed to a read-only device */
3366 if (op_type != OBJ_OP_READ) {
3367 if (rbd_dev->mapping.read_only) {
3371 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3375 * Quit early if the mapped snapshot no longer exists. It's
3376 * still possible the snapshot will have disappeared by the
3377 * time our request arrives at the osd, but there's no sense in
3378 * sending it if we already know.
3380 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3381 dout("request for non-existent snapshot");
3382 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3387 if (offset && length > U64_MAX - offset + 1) {
3388 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3391 goto err_rq; /* Shouldn't happen */
3394 blk_mq_start_request(rq);
3396 down_read(&rbd_dev->header_rwsem);
3397 mapping_size = rbd_dev->mapping.size;
3398 if (op_type != OBJ_OP_READ) {
3399 snapc = rbd_dev->header.snapc;
3400 ceph_get_snap_context(snapc);
3402 up_read(&rbd_dev->header_rwsem);
3404 if (offset + length > mapping_size) {
3405 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3406 length, mapping_size);
3411 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
3417 img_request->rq = rq;
3419 if (op_type == OBJ_OP_DISCARD)
3420 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3423 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3426 goto err_img_request;
3428 result = rbd_img_request_submit(img_request);
3430 goto err_img_request;
3435 rbd_img_request_put(img_request);
3438 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
3439 obj_op_name(op_type), length, offset, result);
3440 ceph_put_snap_context(snapc);
3442 blk_mq_end_request(rq, result);
3445 static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3446 const struct blk_mq_queue_data *bd)
3448 struct request *rq = bd->rq;
3449 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3451 queue_work(rbd_wq, work);
3452 return BLK_MQ_RQ_QUEUE_OK;
3456 * a queue callback. Makes sure that we don't create a bio that spans across
3457 * multiple osd objects. One exception would be with a single page bios,
3458 * which we handle later at bio_chain_clone_range()
3460 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3461 struct bio_vec *bvec)
3463 struct rbd_device *rbd_dev = q->queuedata;
3464 sector_t sector_offset;
3465 sector_t sectors_per_obj;
3466 sector_t obj_sector_offset;
3470 * Find how far into its rbd object the partition-relative
3471 * bio start sector is to offset relative to the enclosing
3474 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3475 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3476 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3479 * Compute the number of bytes from that offset to the end
3480 * of the object. Account for what's already used by the bio.
3482 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3483 if (ret > bmd->bi_size)
3484 ret -= bmd->bi_size;
3489 * Don't send back more than was asked for. And if the bio
3490 * was empty, let the whole thing through because: "Note
3491 * that a block device *must* allow a single page to be
3492 * added to an empty bio."
3494 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3495 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3496 ret = (int) bvec->bv_len;
3501 static void rbd_free_disk(struct rbd_device *rbd_dev)
3503 struct gendisk *disk = rbd_dev->disk;
3508 rbd_dev->disk = NULL;
3509 if (disk->flags & GENHD_FL_UP) {
3512 blk_cleanup_queue(disk->queue);
3513 blk_mq_free_tag_set(&rbd_dev->tag_set);
3518 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3519 const char *object_name,
3520 u64 offset, u64 length, void *buf)
3523 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3524 struct rbd_obj_request *obj_request;
3525 struct page **pages = NULL;
3530 page_count = (u32) calc_pages_for(offset, length);
3531 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3533 return PTR_ERR(pages);
3536 obj_request = rbd_obj_request_create(object_name, offset, length,
3541 obj_request->pages = pages;
3542 obj_request->page_count = page_count;
3544 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3546 if (!obj_request->osd_req)
3549 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3550 offset, length, 0, 0);
3551 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3553 obj_request->length,
3554 obj_request->offset & ~PAGE_MASK,
3556 rbd_osd_req_format_read(obj_request);
3558 ret = rbd_obj_request_submit(osdc, obj_request);
3561 ret = rbd_obj_request_wait(obj_request);
3565 ret = obj_request->result;
3569 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3570 size = (size_t) obj_request->xferred;
3571 ceph_copy_from_page_vector(pages, buf, 0, size);
3572 rbd_assert(size <= (size_t)INT_MAX);
3576 rbd_obj_request_put(obj_request);
3578 ceph_release_page_vector(pages, page_count);
3584 * Read the complete header for the given rbd device. On successful
3585 * return, the rbd_dev->header field will contain up-to-date
3586 * information about the image.
3588 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3590 struct rbd_image_header_ondisk *ondisk = NULL;
3597 * The complete header will include an array of its 64-bit
3598 * snapshot ids, followed by the names of those snapshots as
3599 * a contiguous block of NUL-terminated strings. Note that
3600 * the number of snapshots could change by the time we read
3601 * it in, in which case we re-read it.
3608 size = sizeof (*ondisk);
3609 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3611 ondisk = kmalloc(size, GFP_KERNEL);
3615 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3619 if ((size_t)ret < size) {
3621 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3625 if (!rbd_dev_ondisk_valid(ondisk)) {
3627 rbd_warn(rbd_dev, "invalid header");
3631 names_size = le64_to_cpu(ondisk->snap_names_len);
3632 want_count = snap_count;
3633 snap_count = le32_to_cpu(ondisk->snap_count);
3634 } while (snap_count != want_count);
3636 ret = rbd_header_from_disk(rbd_dev, ondisk);
3644 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3645 * has disappeared from the (just updated) snapshot context.
3647 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3651 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3654 snap_id = rbd_dev->spec->snap_id;
3655 if (snap_id == CEPH_NOSNAP)
3658 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3659 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3662 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3668 * Don't hold the lock while doing disk operations,
3669 * or lock ordering will conflict with the bdev mutex via:
3670 * rbd_add() -> blkdev_get() -> rbd_open()
3672 spin_lock_irq(&rbd_dev->lock);
3673 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3674 spin_unlock_irq(&rbd_dev->lock);
3676 * If the device is being removed, rbd_dev->disk has
3677 * been destroyed, so don't try to update its size
3680 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3681 dout("setting size to %llu sectors", (unsigned long long)size);
3682 set_capacity(rbd_dev->disk, size);
3683 revalidate_disk(rbd_dev->disk);
3687 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3692 down_write(&rbd_dev->header_rwsem);
3693 mapping_size = rbd_dev->mapping.size;
3695 ret = rbd_dev_header_info(rbd_dev);
3700 * If there is a parent, see if it has disappeared due to the
3701 * mapped image getting flattened.
3703 if (rbd_dev->parent) {
3704 ret = rbd_dev_v2_parent_info(rbd_dev);
3709 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3710 rbd_dev->mapping.size = rbd_dev->header.image_size;
3712 /* validate mapped snapshot's EXISTS flag */
3713 rbd_exists_validate(rbd_dev);
3717 up_write(&rbd_dev->header_rwsem);
3718 if (!ret && mapping_size != rbd_dev->mapping.size)
3719 rbd_dev_update_size(rbd_dev);
3724 static int rbd_init_request(void *data, struct request *rq,
3725 unsigned int hctx_idx, unsigned int request_idx,
3726 unsigned int numa_node)
3728 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3730 INIT_WORK(work, rbd_queue_workfn);
3734 static struct blk_mq_ops rbd_mq_ops = {
3735 .queue_rq = rbd_queue_rq,
3736 .map_queue = blk_mq_map_queue,
3737 .init_request = rbd_init_request,
3740 static int rbd_init_disk(struct rbd_device *rbd_dev)
3742 struct gendisk *disk;
3743 struct request_queue *q;
3747 /* create gendisk info */
3748 disk = alloc_disk(single_major ?
3749 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3750 RBD_MINORS_PER_MAJOR);
3754 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3756 disk->major = rbd_dev->major;
3757 disk->first_minor = rbd_dev->minor;
3759 disk->flags |= GENHD_FL_EXT_DEVT;
3760 disk->fops = &rbd_bd_ops;
3761 disk->private_data = rbd_dev;
3763 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3764 rbd_dev->tag_set.ops = &rbd_mq_ops;
3765 rbd_dev->tag_set.queue_depth = BLKDEV_MAX_RQ;
3766 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
3767 rbd_dev->tag_set.flags =
3768 BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
3769 rbd_dev->tag_set.nr_hw_queues = 1;
3770 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3772 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3776 q = blk_mq_init_queue(&rbd_dev->tag_set);
3782 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
3783 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
3785 /* set io sizes to object size */
3786 segment_size = rbd_obj_bytes(&rbd_dev->header);
3787 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3788 blk_queue_max_segment_size(q, segment_size);
3789 blk_queue_io_min(q, segment_size);
3790 blk_queue_io_opt(q, segment_size);
3792 /* enable the discard support */
3793 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
3794 q->limits.discard_granularity = segment_size;
3795 q->limits.discard_alignment = segment_size;
3796 q->limits.max_discard_sectors = segment_size / SECTOR_SIZE;
3797 q->limits.discard_zeroes_data = 1;
3799 blk_queue_merge_bvec(q, rbd_merge_bvec);
3802 q->queuedata = rbd_dev;
3804 rbd_dev->disk = disk;
3808 blk_mq_free_tag_set(&rbd_dev->tag_set);
3818 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3820 return container_of(dev, struct rbd_device, dev);
3823 static ssize_t rbd_size_show(struct device *dev,
3824 struct device_attribute *attr, char *buf)
3826 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3828 return sprintf(buf, "%llu\n",
3829 (unsigned long long)rbd_dev->mapping.size);
3833 * Note this shows the features for whatever's mapped, which is not
3834 * necessarily the base image.
3836 static ssize_t rbd_features_show(struct device *dev,
3837 struct device_attribute *attr, char *buf)
3839 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3841 return sprintf(buf, "0x%016llx\n",
3842 (unsigned long long)rbd_dev->mapping.features);
3845 static ssize_t rbd_major_show(struct device *dev,
3846 struct device_attribute *attr, char *buf)
3848 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3851 return sprintf(buf, "%d\n", rbd_dev->major);
3853 return sprintf(buf, "(none)\n");
3856 static ssize_t rbd_minor_show(struct device *dev,
3857 struct device_attribute *attr, char *buf)
3859 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3861 return sprintf(buf, "%d\n", rbd_dev->minor);
3864 static ssize_t rbd_client_id_show(struct device *dev,
3865 struct device_attribute *attr, char *buf)
3867 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3869 return sprintf(buf, "client%lld\n",
3870 ceph_client_id(rbd_dev->rbd_client->client));
3873 static ssize_t rbd_pool_show(struct device *dev,
3874 struct device_attribute *attr, char *buf)
3876 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3878 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3881 static ssize_t rbd_pool_id_show(struct device *dev,
3882 struct device_attribute *attr, char *buf)
3884 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3886 return sprintf(buf, "%llu\n",
3887 (unsigned long long) rbd_dev->spec->pool_id);
3890 static ssize_t rbd_name_show(struct device *dev,
3891 struct device_attribute *attr, char *buf)
3893 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3895 if (rbd_dev->spec->image_name)
3896 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3898 return sprintf(buf, "(unknown)\n");
3901 static ssize_t rbd_image_id_show(struct device *dev,
3902 struct device_attribute *attr, char *buf)
3904 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3906 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3910 * Shows the name of the currently-mapped snapshot (or
3911 * RBD_SNAP_HEAD_NAME for the base image).
3913 static ssize_t rbd_snap_show(struct device *dev,
3914 struct device_attribute *attr,
3917 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3919 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3923 * For a v2 image, shows the chain of parent images, separated by empty
3924 * lines. For v1 images or if there is no parent, shows "(no parent
3927 static ssize_t rbd_parent_show(struct device *dev,
3928 struct device_attribute *attr,
3931 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3934 if (!rbd_dev->parent)
3935 return sprintf(buf, "(no parent image)\n");
3937 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3938 struct rbd_spec *spec = rbd_dev->parent_spec;
3940 count += sprintf(&buf[count], "%s"
3941 "pool_id %llu\npool_name %s\n"
3942 "image_id %s\nimage_name %s\n"
3943 "snap_id %llu\nsnap_name %s\n"
3945 !count ? "" : "\n", /* first? */
3946 spec->pool_id, spec->pool_name,
3947 spec->image_id, spec->image_name ?: "(unknown)",
3948 spec->snap_id, spec->snap_name,
3949 rbd_dev->parent_overlap);
3955 static ssize_t rbd_image_refresh(struct device *dev,
3956 struct device_attribute *attr,
3960 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3963 ret = rbd_dev_refresh(rbd_dev);
3970 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3971 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3972 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3973 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3974 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3975 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3976 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3977 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3978 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3979 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3980 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3981 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3983 static struct attribute *rbd_attrs[] = {
3984 &dev_attr_size.attr,
3985 &dev_attr_features.attr,
3986 &dev_attr_major.attr,
3987 &dev_attr_minor.attr,
3988 &dev_attr_client_id.attr,
3989 &dev_attr_pool.attr,
3990 &dev_attr_pool_id.attr,
3991 &dev_attr_name.attr,
3992 &dev_attr_image_id.attr,
3993 &dev_attr_current_snap.attr,
3994 &dev_attr_parent.attr,
3995 &dev_attr_refresh.attr,
3999 static struct attribute_group rbd_attr_group = {
4003 static const struct attribute_group *rbd_attr_groups[] = {
4008 static void rbd_sysfs_dev_release(struct device *dev)
4012 static struct device_type rbd_device_type = {
4014 .groups = rbd_attr_groups,
4015 .release = rbd_sysfs_dev_release,
4018 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4020 kref_get(&spec->kref);
4025 static void rbd_spec_free(struct kref *kref);
4026 static void rbd_spec_put(struct rbd_spec *spec)
4029 kref_put(&spec->kref, rbd_spec_free);
4032 static struct rbd_spec *rbd_spec_alloc(void)
4034 struct rbd_spec *spec;
4036 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4040 spec->pool_id = CEPH_NOPOOL;
4041 spec->snap_id = CEPH_NOSNAP;
4042 kref_init(&spec->kref);
4047 static void rbd_spec_free(struct kref *kref)
4049 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4051 kfree(spec->pool_name);
4052 kfree(spec->image_id);
4053 kfree(spec->image_name);
4054 kfree(spec->snap_name);
4058 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4059 struct rbd_spec *spec)
4061 struct rbd_device *rbd_dev;
4063 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
4067 spin_lock_init(&rbd_dev->lock);
4069 atomic_set(&rbd_dev->parent_ref, 0);
4070 INIT_LIST_HEAD(&rbd_dev->node);
4071 init_rwsem(&rbd_dev->header_rwsem);
4073 rbd_dev->spec = spec;
4074 rbd_dev->rbd_client = rbdc;
4076 /* Initialize the layout used for all rbd requests */
4078 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4079 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
4080 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4081 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
4086 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4088 rbd_put_client(rbd_dev->rbd_client);
4089 rbd_spec_put(rbd_dev->spec);
4094 * Get the size and object order for an image snapshot, or if
4095 * snap_id is CEPH_NOSNAP, gets this information for the base
4098 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4099 u8 *order, u64 *snap_size)
4101 __le64 snapid = cpu_to_le64(snap_id);
4106 } __attribute__ ((packed)) size_buf = { 0 };
4108 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4110 &snapid, sizeof (snapid),
4111 &size_buf, sizeof (size_buf));
4112 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4115 if (ret < sizeof (size_buf))
4119 *order = size_buf.order;
4120 dout(" order %u", (unsigned int)*order);
4122 *snap_size = le64_to_cpu(size_buf.size);
4124 dout(" snap_id 0x%016llx snap_size = %llu\n",
4125 (unsigned long long)snap_id,
4126 (unsigned long long)*snap_size);
4131 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4133 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4134 &rbd_dev->header.obj_order,
4135 &rbd_dev->header.image_size);
4138 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4144 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4148 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4149 "rbd", "get_object_prefix", NULL, 0,
4150 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4151 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4156 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4157 p + ret, NULL, GFP_NOIO);
4160 if (IS_ERR(rbd_dev->header.object_prefix)) {
4161 ret = PTR_ERR(rbd_dev->header.object_prefix);
4162 rbd_dev->header.object_prefix = NULL;
4164 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4172 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4175 __le64 snapid = cpu_to_le64(snap_id);
4179 } __attribute__ ((packed)) features_buf = { 0 };
4183 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4184 "rbd", "get_features",
4185 &snapid, sizeof (snapid),
4186 &features_buf, sizeof (features_buf));
4187 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4190 if (ret < sizeof (features_buf))
4193 incompat = le64_to_cpu(features_buf.incompat);
4194 if (incompat & ~RBD_FEATURES_SUPPORTED)
4197 *snap_features = le64_to_cpu(features_buf.features);
4199 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4200 (unsigned long long)snap_id,
4201 (unsigned long long)*snap_features,
4202 (unsigned long long)le64_to_cpu(features_buf.incompat));
4207 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4209 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4210 &rbd_dev->header.features);
4213 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4215 struct rbd_spec *parent_spec;
4217 void *reply_buf = NULL;
4227 parent_spec = rbd_spec_alloc();
4231 size = sizeof (__le64) + /* pool_id */
4232 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4233 sizeof (__le64) + /* snap_id */
4234 sizeof (__le64); /* overlap */
4235 reply_buf = kmalloc(size, GFP_KERNEL);
4241 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
4242 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4243 "rbd", "get_parent",
4244 &snapid, sizeof (snapid),
4246 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4251 end = reply_buf + ret;
4253 ceph_decode_64_safe(&p, end, pool_id, out_err);
4254 if (pool_id == CEPH_NOPOOL) {
4256 * Either the parent never existed, or we have
4257 * record of it but the image got flattened so it no
4258 * longer has a parent. When the parent of a
4259 * layered image disappears we immediately set the
4260 * overlap to 0. The effect of this is that all new
4261 * requests will be treated as if the image had no
4264 if (rbd_dev->parent_overlap) {
4265 rbd_dev->parent_overlap = 0;
4266 rbd_dev_parent_put(rbd_dev);
4267 pr_info("%s: clone image has been flattened\n",
4268 rbd_dev->disk->disk_name);
4271 goto out; /* No parent? No problem. */
4274 /* The ceph file layout needs to fit pool id in 32 bits */
4277 if (pool_id > (u64)U32_MAX) {
4278 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
4279 (unsigned long long)pool_id, U32_MAX);
4283 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4284 if (IS_ERR(image_id)) {
4285 ret = PTR_ERR(image_id);
4288 ceph_decode_64_safe(&p, end, snap_id, out_err);
4289 ceph_decode_64_safe(&p, end, overlap, out_err);
4292 * The parent won't change (except when the clone is
4293 * flattened, already handled that). So we only need to
4294 * record the parent spec we have not already done so.
4296 if (!rbd_dev->parent_spec) {
4297 parent_spec->pool_id = pool_id;
4298 parent_spec->image_id = image_id;
4299 parent_spec->snap_id = snap_id;
4300 rbd_dev->parent_spec = parent_spec;
4301 parent_spec = NULL; /* rbd_dev now owns this */
4307 * We always update the parent overlap. If it's zero we issue
4308 * a warning, as we will proceed as if there was no parent.
4312 /* refresh, careful to warn just once */
4313 if (rbd_dev->parent_overlap)
4315 "clone now standalone (overlap became 0)");
4318 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4321 rbd_dev->parent_overlap = overlap;
4327 rbd_spec_put(parent_spec);
4332 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4336 __le64 stripe_count;
4337 } __attribute__ ((packed)) striping_info_buf = { 0 };
4338 size_t size = sizeof (striping_info_buf);
4345 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4346 "rbd", "get_stripe_unit_count", NULL, 0,
4347 (char *)&striping_info_buf, size);
4348 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4355 * We don't actually support the "fancy striping" feature
4356 * (STRIPINGV2) yet, but if the striping sizes are the
4357 * defaults the behavior is the same as before. So find
4358 * out, and only fail if the image has non-default values.
4361 obj_size = (u64)1 << rbd_dev->header.obj_order;
4362 p = &striping_info_buf;
4363 stripe_unit = ceph_decode_64(&p);
4364 if (stripe_unit != obj_size) {
4365 rbd_warn(rbd_dev, "unsupported stripe unit "
4366 "(got %llu want %llu)",
4367 stripe_unit, obj_size);
4370 stripe_count = ceph_decode_64(&p);
4371 if (stripe_count != 1) {
4372 rbd_warn(rbd_dev, "unsupported stripe count "
4373 "(got %llu want 1)", stripe_count);
4376 rbd_dev->header.stripe_unit = stripe_unit;
4377 rbd_dev->header.stripe_count = stripe_count;
4382 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4384 size_t image_id_size;
4389 void *reply_buf = NULL;
4391 char *image_name = NULL;
4394 rbd_assert(!rbd_dev->spec->image_name);
4396 len = strlen(rbd_dev->spec->image_id);
4397 image_id_size = sizeof (__le32) + len;
4398 image_id = kmalloc(image_id_size, GFP_KERNEL);
4403 end = image_id + image_id_size;
4404 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4406 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4407 reply_buf = kmalloc(size, GFP_KERNEL);
4411 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4412 "rbd", "dir_get_name",
4413 image_id, image_id_size,
4418 end = reply_buf + ret;
4420 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4421 if (IS_ERR(image_name))
4424 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4432 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4434 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4435 const char *snap_name;
4438 /* Skip over names until we find the one we are looking for */
4440 snap_name = rbd_dev->header.snap_names;
4441 while (which < snapc->num_snaps) {
4442 if (!strcmp(name, snap_name))
4443 return snapc->snaps[which];
4444 snap_name += strlen(snap_name) + 1;
4450 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4452 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4457 for (which = 0; !found && which < snapc->num_snaps; which++) {
4458 const char *snap_name;
4460 snap_id = snapc->snaps[which];
4461 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4462 if (IS_ERR(snap_name)) {
4463 /* ignore no-longer existing snapshots */
4464 if (PTR_ERR(snap_name) == -ENOENT)
4469 found = !strcmp(name, snap_name);
4472 return found ? snap_id : CEPH_NOSNAP;
4476 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4477 * no snapshot by that name is found, or if an error occurs.
4479 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4481 if (rbd_dev->image_format == 1)
4482 return rbd_v1_snap_id_by_name(rbd_dev, name);
4484 return rbd_v2_snap_id_by_name(rbd_dev, name);
4488 * An image being mapped will have everything but the snap id.
4490 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4492 struct rbd_spec *spec = rbd_dev->spec;
4494 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4495 rbd_assert(spec->image_id && spec->image_name);
4496 rbd_assert(spec->snap_name);
4498 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4501 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4502 if (snap_id == CEPH_NOSNAP)
4505 spec->snap_id = snap_id;
4507 spec->snap_id = CEPH_NOSNAP;
4514 * A parent image will have all ids but none of the names.
4516 * All names in an rbd spec are dynamically allocated. It's OK if we
4517 * can't figure out the name for an image id.
4519 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
4521 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4522 struct rbd_spec *spec = rbd_dev->spec;
4523 const char *pool_name;
4524 const char *image_name;
4525 const char *snap_name;
4528 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4529 rbd_assert(spec->image_id);
4530 rbd_assert(spec->snap_id != CEPH_NOSNAP);
4532 /* Get the pool name; we have to make our own copy of this */
4534 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4536 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4539 pool_name = kstrdup(pool_name, GFP_KERNEL);
4543 /* Fetch the image name; tolerate failure here */
4545 image_name = rbd_dev_image_name(rbd_dev);
4547 rbd_warn(rbd_dev, "unable to get image name");
4549 /* Fetch the snapshot name */
4551 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4552 if (IS_ERR(snap_name)) {
4553 ret = PTR_ERR(snap_name);
4557 spec->pool_name = pool_name;
4558 spec->image_name = image_name;
4559 spec->snap_name = snap_name;
4569 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4578 struct ceph_snap_context *snapc;
4582 * We'll need room for the seq value (maximum snapshot id),
4583 * snapshot count, and array of that many snapshot ids.
4584 * For now we have a fixed upper limit on the number we're
4585 * prepared to receive.
4587 size = sizeof (__le64) + sizeof (__le32) +
4588 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4589 reply_buf = kzalloc(size, GFP_KERNEL);
4593 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4594 "rbd", "get_snapcontext", NULL, 0,
4596 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4601 end = reply_buf + ret;
4603 ceph_decode_64_safe(&p, end, seq, out);
4604 ceph_decode_32_safe(&p, end, snap_count, out);
4607 * Make sure the reported number of snapshot ids wouldn't go
4608 * beyond the end of our buffer. But before checking that,
4609 * make sure the computed size of the snapshot context we
4610 * allocate is representable in a size_t.
4612 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4617 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4621 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4627 for (i = 0; i < snap_count; i++)
4628 snapc->snaps[i] = ceph_decode_64(&p);
4630 ceph_put_snap_context(rbd_dev->header.snapc);
4631 rbd_dev->header.snapc = snapc;
4633 dout(" snap context seq = %llu, snap_count = %u\n",
4634 (unsigned long long)seq, (unsigned int)snap_count);
4641 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4652 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4653 reply_buf = kmalloc(size, GFP_KERNEL);
4655 return ERR_PTR(-ENOMEM);
4657 snapid = cpu_to_le64(snap_id);
4658 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4659 "rbd", "get_snapshot_name",
4660 &snapid, sizeof (snapid),
4662 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4664 snap_name = ERR_PTR(ret);
4669 end = reply_buf + ret;
4670 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4671 if (IS_ERR(snap_name))
4674 dout(" snap_id 0x%016llx snap_name = %s\n",
4675 (unsigned long long)snap_id, snap_name);
4682 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4684 bool first_time = rbd_dev->header.object_prefix == NULL;
4687 ret = rbd_dev_v2_image_size(rbd_dev);
4692 ret = rbd_dev_v2_header_onetime(rbd_dev);
4697 ret = rbd_dev_v2_snap_context(rbd_dev);
4698 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4703 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4705 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4707 if (rbd_dev->image_format == 1)
4708 return rbd_dev_v1_header_info(rbd_dev);
4710 return rbd_dev_v2_header_info(rbd_dev);
4713 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4718 dev = &rbd_dev->dev;
4719 dev->bus = &rbd_bus_type;
4720 dev->type = &rbd_device_type;
4721 dev->parent = &rbd_root_dev;
4722 dev->release = rbd_dev_device_release;
4723 dev_set_name(dev, "%d", rbd_dev->dev_id);
4724 ret = device_register(dev);
4729 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4731 device_unregister(&rbd_dev->dev);
4735 * Get a unique rbd identifier for the given new rbd_dev, and add
4736 * the rbd_dev to the global list.
4738 static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4742 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4743 0, minor_to_rbd_dev_id(1 << MINORBITS),
4748 rbd_dev->dev_id = new_dev_id;
4750 spin_lock(&rbd_dev_list_lock);
4751 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4752 spin_unlock(&rbd_dev_list_lock);
4754 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4760 * Remove an rbd_dev from the global list, and record that its
4761 * identifier is no longer in use.
4763 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4765 spin_lock(&rbd_dev_list_lock);
4766 list_del_init(&rbd_dev->node);
4767 spin_unlock(&rbd_dev_list_lock);
4769 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4771 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4775 * Skips over white space at *buf, and updates *buf to point to the
4776 * first found non-space character (if any). Returns the length of
4777 * the token (string of non-white space characters) found. Note
4778 * that *buf must be terminated with '\0'.
4780 static inline size_t next_token(const char **buf)
4783 * These are the characters that produce nonzero for
4784 * isspace() in the "C" and "POSIX" locales.
4786 const char *spaces = " \f\n\r\t\v";
4788 *buf += strspn(*buf, spaces); /* Find start of token */
4790 return strcspn(*buf, spaces); /* Return token length */
4794 * Finds the next token in *buf, dynamically allocates a buffer big
4795 * enough to hold a copy of it, and copies the token into the new
4796 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4797 * that a duplicate buffer is created even for a zero-length token.
4799 * Returns a pointer to the newly-allocated duplicate, or a null
4800 * pointer if memory for the duplicate was not available. If
4801 * the lenp argument is a non-null pointer, the length of the token
4802 * (not including the '\0') is returned in *lenp.
4804 * If successful, the *buf pointer will be updated to point beyond
4805 * the end of the found token.
4807 * Note: uses GFP_KERNEL for allocation.
4809 static inline char *dup_token(const char **buf, size_t *lenp)
4814 len = next_token(buf);
4815 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4818 *(dup + len) = '\0';
4828 * Parse the options provided for an "rbd add" (i.e., rbd image
4829 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4830 * and the data written is passed here via a NUL-terminated buffer.
4831 * Returns 0 if successful or an error code otherwise.
4833 * The information extracted from these options is recorded in
4834 * the other parameters which return dynamically-allocated
4837 * The address of a pointer that will refer to a ceph options
4838 * structure. Caller must release the returned pointer using
4839 * ceph_destroy_options() when it is no longer needed.
4841 * Address of an rbd options pointer. Fully initialized by
4842 * this function; caller must release with kfree().
4844 * Address of an rbd image specification pointer. Fully
4845 * initialized by this function based on parsed options.
4846 * Caller must release with rbd_spec_put().
4848 * The options passed take this form:
4849 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4852 * A comma-separated list of one or more monitor addresses.
4853 * A monitor address is an ip address, optionally followed
4854 * by a port number (separated by a colon).
4855 * I.e.: ip1[:port1][,ip2[:port2]...]
4857 * A comma-separated list of ceph and/or rbd options.
4859 * The name of the rados pool containing the rbd image.
4861 * The name of the image in that pool to map.
4863 * An optional snapshot id. If provided, the mapping will
4864 * present data from the image at the time that snapshot was
4865 * created. The image head is used if no snapshot id is
4866 * provided. Snapshot mappings are always read-only.
4868 static int rbd_add_parse_args(const char *buf,
4869 struct ceph_options **ceph_opts,
4870 struct rbd_options **opts,
4871 struct rbd_spec **rbd_spec)
4875 const char *mon_addrs;
4877 size_t mon_addrs_size;
4878 struct rbd_spec *spec = NULL;
4879 struct rbd_options *rbd_opts = NULL;
4880 struct ceph_options *copts;
4883 /* The first four tokens are required */
4885 len = next_token(&buf);
4887 rbd_warn(NULL, "no monitor address(es) provided");
4891 mon_addrs_size = len + 1;
4895 options = dup_token(&buf, NULL);
4899 rbd_warn(NULL, "no options provided");
4903 spec = rbd_spec_alloc();
4907 spec->pool_name = dup_token(&buf, NULL);
4908 if (!spec->pool_name)
4910 if (!*spec->pool_name) {
4911 rbd_warn(NULL, "no pool name provided");
4915 spec->image_name = dup_token(&buf, NULL);
4916 if (!spec->image_name)
4918 if (!*spec->image_name) {
4919 rbd_warn(NULL, "no image name provided");
4924 * Snapshot name is optional; default is to use "-"
4925 * (indicating the head/no snapshot).
4927 len = next_token(&buf);
4929 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4930 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4931 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4932 ret = -ENAMETOOLONG;
4935 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4938 *(snap_name + len) = '\0';
4939 spec->snap_name = snap_name;
4941 /* Initialize all rbd options to the defaults */
4943 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4947 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4949 copts = ceph_parse_options(options, mon_addrs,
4950 mon_addrs + mon_addrs_size - 1,
4951 parse_rbd_opts_token, rbd_opts);
4952 if (IS_ERR(copts)) {
4953 ret = PTR_ERR(copts);
4974 * Return pool id (>= 0) or a negative error code.
4976 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4979 unsigned long timeout = rbdc->client->options->mount_timeout * HZ;
4984 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4985 if (ret == -ENOENT && tries++ < 1) {
4986 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4991 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4992 ceph_monc_request_next_osdmap(&rbdc->client->monc);
4993 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4994 newest_epoch, timeout);
4997 /* the osdmap we have is new enough */
5006 * An rbd format 2 image has a unique identifier, distinct from the
5007 * name given to it by the user. Internally, that identifier is
5008 * what's used to specify the names of objects related to the image.
5010 * A special "rbd id" object is used to map an rbd image name to its
5011 * id. If that object doesn't exist, then there is no v2 rbd image
5012 * with the supplied name.
5014 * This function will record the given rbd_dev's image_id field if
5015 * it can be determined, and in that case will return 0. If any
5016 * errors occur a negative errno will be returned and the rbd_dev's
5017 * image_id field will be unchanged (and should be NULL).
5019 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5028 * When probing a parent image, the image id is already
5029 * known (and the image name likely is not). There's no
5030 * need to fetch the image id again in this case. We
5031 * do still need to set the image format though.
5033 if (rbd_dev->spec->image_id) {
5034 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5040 * First, see if the format 2 image id file exists, and if
5041 * so, get the image's persistent id from it.
5043 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
5044 object_name = kmalloc(size, GFP_NOIO);
5047 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
5048 dout("rbd id object name is %s\n", object_name);
5050 /* Response will be an encoded string, which includes a length */
5052 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5053 response = kzalloc(size, GFP_NOIO);
5059 /* If it doesn't exist we'll assume it's a format 1 image */
5061 ret = rbd_obj_method_sync(rbd_dev, object_name,
5062 "rbd", "get_id", NULL, 0,
5063 response, RBD_IMAGE_ID_LEN_MAX);
5064 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5065 if (ret == -ENOENT) {
5066 image_id = kstrdup("", GFP_KERNEL);
5067 ret = image_id ? 0 : -ENOMEM;
5069 rbd_dev->image_format = 1;
5070 } else if (ret >= 0) {
5073 image_id = ceph_extract_encoded_string(&p, p + ret,
5075 ret = PTR_ERR_OR_ZERO(image_id);
5077 rbd_dev->image_format = 2;
5081 rbd_dev->spec->image_id = image_id;
5082 dout("image_id is %s\n", image_id);
5092 * Undo whatever state changes are made by v1 or v2 header info
5095 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5097 struct rbd_image_header *header;
5099 rbd_dev_parent_put(rbd_dev);
5101 /* Free dynamic fields from the header, then zero it out */
5103 header = &rbd_dev->header;
5104 ceph_put_snap_context(header->snapc);
5105 kfree(header->snap_sizes);
5106 kfree(header->snap_names);
5107 kfree(header->object_prefix);
5108 memset(header, 0, sizeof (*header));
5111 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5115 ret = rbd_dev_v2_object_prefix(rbd_dev);
5120 * Get the and check features for the image. Currently the
5121 * features are assumed to never change.
5123 ret = rbd_dev_v2_features(rbd_dev);
5127 /* If the image supports fancy striping, get its parameters */
5129 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5130 ret = rbd_dev_v2_striping_info(rbd_dev);
5134 /* No support for crypto and compression type format 2 images */
5138 rbd_dev->header.features = 0;
5139 kfree(rbd_dev->header.object_prefix);
5140 rbd_dev->header.object_prefix = NULL;
5145 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
5147 struct rbd_device *parent = NULL;
5148 struct rbd_spec *parent_spec;
5149 struct rbd_client *rbdc;
5152 if (!rbd_dev->parent_spec)
5155 * We need to pass a reference to the client and the parent
5156 * spec when creating the parent rbd_dev. Images related by
5157 * parent/child relationships always share both.
5159 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
5160 rbdc = __rbd_get_client(rbd_dev->rbd_client);
5163 parent = rbd_dev_create(rbdc, parent_spec);
5167 ret = rbd_dev_image_probe(parent, false);
5170 rbd_dev->parent = parent;
5171 atomic_set(&rbd_dev->parent_ref, 1);
5176 rbd_dev_unparent(rbd_dev);
5177 kfree(rbd_dev->header_name);
5178 rbd_dev_destroy(parent);
5180 rbd_put_client(rbdc);
5181 rbd_spec_put(parent_spec);
5187 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5191 /* Get an id and fill in device name. */
5193 ret = rbd_dev_id_get(rbd_dev);
5197 BUILD_BUG_ON(DEV_NAME_LEN
5198 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5199 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5201 /* Record our major and minor device numbers. */
5203 if (!single_major) {
5204 ret = register_blkdev(0, rbd_dev->name);
5208 rbd_dev->major = ret;
5211 rbd_dev->major = rbd_major;
5212 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5215 /* Set up the blkdev mapping. */
5217 ret = rbd_init_disk(rbd_dev);
5219 goto err_out_blkdev;
5221 ret = rbd_dev_mapping_set(rbd_dev);
5225 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5226 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5228 ret = rbd_bus_add_dev(rbd_dev);
5230 goto err_out_mapping;
5232 /* Everything's ready. Announce the disk to the world. */
5234 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5235 add_disk(rbd_dev->disk);
5237 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5238 (unsigned long long) rbd_dev->mapping.size);
5243 rbd_dev_mapping_clear(rbd_dev);
5245 rbd_free_disk(rbd_dev);
5248 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5250 rbd_dev_id_put(rbd_dev);
5251 rbd_dev_mapping_clear(rbd_dev);
5256 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5258 struct rbd_spec *spec = rbd_dev->spec;
5261 /* Record the header object name for this rbd image. */
5263 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5265 if (rbd_dev->image_format == 1)
5266 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5268 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5270 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5271 if (!rbd_dev->header_name)
5274 if (rbd_dev->image_format == 1)
5275 sprintf(rbd_dev->header_name, "%s%s",
5276 spec->image_name, RBD_SUFFIX);
5278 sprintf(rbd_dev->header_name, "%s%s",
5279 RBD_HEADER_PREFIX, spec->image_id);
5283 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5285 rbd_dev_unprobe(rbd_dev);
5286 kfree(rbd_dev->header_name);
5287 rbd_dev->header_name = NULL;
5288 rbd_dev->image_format = 0;
5289 kfree(rbd_dev->spec->image_id);
5290 rbd_dev->spec->image_id = NULL;
5292 rbd_dev_destroy(rbd_dev);
5296 * Probe for the existence of the header object for the given rbd
5297 * device. If this image is the one being mapped (i.e., not a
5298 * parent), initiate a watch on its header object before using that
5299 * object to get detailed information about the rbd image.
5301 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5306 * Get the id from the image id object. Unless there's an
5307 * error, rbd_dev->spec->image_id will be filled in with
5308 * a dynamically-allocated string, and rbd_dev->image_format
5309 * will be set to either 1 or 2.
5311 ret = rbd_dev_image_id(rbd_dev);
5315 ret = rbd_dev_header_name(rbd_dev);
5317 goto err_out_format;
5320 ret = rbd_dev_header_watch_sync(rbd_dev);
5323 pr_info("image %s/%s does not exist\n",
5324 rbd_dev->spec->pool_name,
5325 rbd_dev->spec->image_name);
5326 goto out_header_name;
5330 ret = rbd_dev_header_info(rbd_dev);
5335 * If this image is the one being mapped, we have pool name and
5336 * id, image name and id, and snap name - need to fill snap id.
5337 * Otherwise this is a parent image, identified by pool, image
5338 * and snap ids - need to fill in names for those ids.
5341 ret = rbd_spec_fill_snap_id(rbd_dev);
5343 ret = rbd_spec_fill_names(rbd_dev);
5346 pr_info("snap %s/%s@%s does not exist\n",
5347 rbd_dev->spec->pool_name,
5348 rbd_dev->spec->image_name,
5349 rbd_dev->spec->snap_name);
5353 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5354 ret = rbd_dev_v2_parent_info(rbd_dev);
5359 * Need to warn users if this image is the one being
5360 * mapped and has a parent.
5362 if (mapping && rbd_dev->parent_spec)
5364 "WARNING: kernel layering is EXPERIMENTAL!");
5367 ret = rbd_dev_probe_parent(rbd_dev);
5371 dout("discovered format %u image, header name is %s\n",
5372 rbd_dev->image_format, rbd_dev->header_name);
5376 rbd_dev_unprobe(rbd_dev);
5379 rbd_dev_header_unwatch_sync(rbd_dev);
5381 kfree(rbd_dev->header_name);
5382 rbd_dev->header_name = NULL;
5384 rbd_dev->image_format = 0;
5385 kfree(rbd_dev->spec->image_id);
5386 rbd_dev->spec->image_id = NULL;
5390 static ssize_t do_rbd_add(struct bus_type *bus,
5394 struct rbd_device *rbd_dev = NULL;
5395 struct ceph_options *ceph_opts = NULL;
5396 struct rbd_options *rbd_opts = NULL;
5397 struct rbd_spec *spec = NULL;
5398 struct rbd_client *rbdc;
5402 if (!try_module_get(THIS_MODULE))
5405 /* parse add command */
5406 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5408 goto err_out_module;
5409 read_only = rbd_opts->read_only;
5411 rbd_opts = NULL; /* done with this */
5413 rbdc = rbd_get_client(ceph_opts);
5420 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5423 pr_info("pool %s does not exist\n", spec->pool_name);
5424 goto err_out_client;
5426 spec->pool_id = (u64)rc;
5428 /* The ceph file layout needs to fit pool id in 32 bits */
5430 if (spec->pool_id > (u64)U32_MAX) {
5431 rbd_warn(NULL, "pool id too large (%llu > %u)",
5432 (unsigned long long)spec->pool_id, U32_MAX);
5434 goto err_out_client;
5437 rbd_dev = rbd_dev_create(rbdc, spec);
5439 goto err_out_client;
5440 rbdc = NULL; /* rbd_dev now owns this */
5441 spec = NULL; /* rbd_dev now owns this */
5443 rc = rbd_dev_image_probe(rbd_dev, true);
5445 goto err_out_rbd_dev;
5447 /* If we are mapping a snapshot it must be marked read-only */
5449 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5451 rbd_dev->mapping.read_only = read_only;
5453 rc = rbd_dev_device_setup(rbd_dev);
5456 * rbd_dev_header_unwatch_sync() can't be moved into
5457 * rbd_dev_image_release() without refactoring, see
5458 * commit 1f3ef78861ac.
5460 rbd_dev_header_unwatch_sync(rbd_dev);
5461 rbd_dev_image_release(rbd_dev);
5462 goto err_out_module;
5468 rbd_dev_destroy(rbd_dev);
5470 rbd_put_client(rbdc);
5474 module_put(THIS_MODULE);
5476 dout("Error adding device %s\n", buf);
5481 static ssize_t rbd_add(struct bus_type *bus,
5488 return do_rbd_add(bus, buf, count);
5491 static ssize_t rbd_add_single_major(struct bus_type *bus,
5495 return do_rbd_add(bus, buf, count);
5498 static void rbd_dev_device_release(struct device *dev)
5500 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5502 rbd_free_disk(rbd_dev);
5503 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5504 rbd_dev_mapping_clear(rbd_dev);
5506 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5507 rbd_dev_id_put(rbd_dev);
5508 rbd_dev_mapping_clear(rbd_dev);
5511 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5513 while (rbd_dev->parent) {
5514 struct rbd_device *first = rbd_dev;
5515 struct rbd_device *second = first->parent;
5516 struct rbd_device *third;
5519 * Follow to the parent with no grandparent and
5522 while (second && (third = second->parent)) {
5527 rbd_dev_image_release(second);
5528 first->parent = NULL;
5529 first->parent_overlap = 0;
5531 rbd_assert(first->parent_spec);
5532 rbd_spec_put(first->parent_spec);
5533 first->parent_spec = NULL;
5537 static ssize_t do_rbd_remove(struct bus_type *bus,
5541 struct rbd_device *rbd_dev = NULL;
5542 struct list_head *tmp;
5545 bool already = false;
5548 ret = kstrtoul(buf, 10, &ul);
5552 /* convert to int; abort if we lost anything in the conversion */
5558 spin_lock(&rbd_dev_list_lock);
5559 list_for_each(tmp, &rbd_dev_list) {
5560 rbd_dev = list_entry(tmp, struct rbd_device, node);
5561 if (rbd_dev->dev_id == dev_id) {
5567 spin_lock_irq(&rbd_dev->lock);
5568 if (rbd_dev->open_count)
5571 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5573 spin_unlock_irq(&rbd_dev->lock);
5575 spin_unlock(&rbd_dev_list_lock);
5576 if (ret < 0 || already)
5579 rbd_dev_header_unwatch_sync(rbd_dev);
5581 * flush remaining watch callbacks - these must be complete
5582 * before the osd_client is shutdown
5584 dout("%s: flushing notifies", __func__);
5585 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5588 * Don't free anything from rbd_dev->disk until after all
5589 * notifies are completely processed. Otherwise
5590 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5591 * in a potential use after free of rbd_dev->disk or rbd_dev.
5593 rbd_bus_del_dev(rbd_dev);
5594 rbd_dev_image_release(rbd_dev);
5595 module_put(THIS_MODULE);
5600 static ssize_t rbd_remove(struct bus_type *bus,
5607 return do_rbd_remove(bus, buf, count);
5610 static ssize_t rbd_remove_single_major(struct bus_type *bus,
5614 return do_rbd_remove(bus, buf, count);
5618 * create control files in sysfs
5621 static int rbd_sysfs_init(void)
5625 ret = device_register(&rbd_root_dev);
5629 ret = bus_register(&rbd_bus_type);
5631 device_unregister(&rbd_root_dev);
5636 static void rbd_sysfs_cleanup(void)
5638 bus_unregister(&rbd_bus_type);
5639 device_unregister(&rbd_root_dev);
5642 static int rbd_slab_init(void)
5644 rbd_assert(!rbd_img_request_cache);
5645 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5646 sizeof (struct rbd_img_request),
5647 __alignof__(struct rbd_img_request),
5649 if (!rbd_img_request_cache)
5652 rbd_assert(!rbd_obj_request_cache);
5653 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5654 sizeof (struct rbd_obj_request),
5655 __alignof__(struct rbd_obj_request),
5657 if (!rbd_obj_request_cache)
5660 rbd_assert(!rbd_segment_name_cache);
5661 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5662 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5663 if (rbd_segment_name_cache)
5666 if (rbd_obj_request_cache) {
5667 kmem_cache_destroy(rbd_obj_request_cache);
5668 rbd_obj_request_cache = NULL;
5671 kmem_cache_destroy(rbd_img_request_cache);
5672 rbd_img_request_cache = NULL;
5677 static void rbd_slab_exit(void)
5679 rbd_assert(rbd_segment_name_cache);
5680 kmem_cache_destroy(rbd_segment_name_cache);
5681 rbd_segment_name_cache = NULL;
5683 rbd_assert(rbd_obj_request_cache);
5684 kmem_cache_destroy(rbd_obj_request_cache);
5685 rbd_obj_request_cache = NULL;
5687 rbd_assert(rbd_img_request_cache);
5688 kmem_cache_destroy(rbd_img_request_cache);
5689 rbd_img_request_cache = NULL;
5692 static int __init rbd_init(void)
5696 if (!libceph_compatible(NULL)) {
5697 rbd_warn(NULL, "libceph incompatibility (quitting)");
5701 rc = rbd_slab_init();
5706 * The number of active work items is limited by the number of
5707 * rbd devices * queue depth, so leave @max_active at default.
5709 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5716 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5717 if (rbd_major < 0) {
5723 rc = rbd_sysfs_init();
5725 goto err_out_blkdev;
5728 pr_info("loaded (major %d)\n", rbd_major);
5730 pr_info("loaded\n");
5736 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5738 destroy_workqueue(rbd_wq);
5744 static void __exit rbd_exit(void)
5746 ida_destroy(&rbd_dev_id_ida);
5747 rbd_sysfs_cleanup();
5749 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5750 destroy_workqueue(rbd_wq);
5754 module_init(rbd_init);
5755 module_exit(rbd_exit);
5757 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5758 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5759 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5760 /* following authorship retained from original osdblk.c */
5761 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5763 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5764 MODULE_LICENSE("GPL");