2 * Functions related to mapping data to requests
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
12 static bool iovec_gap_to_prv(struct request_queue *q,
13 struct iovec *prv, struct iovec *cur)
15 unsigned long prev_end;
17 if (!queue_virt_boundary(q))
20 if (prv->iov_base == NULL && prv->iov_len == 0)
21 /* prv is not set - don't check */
24 prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
26 return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
27 prev_end & queue_virt_boundary(q));
30 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
34 blk_rq_bio_prep(q, rq, bio);
35 else if (!ll_back_merge_fn(q, rq, bio))
38 rq->biotail->bi_next = bio;
41 rq->__data_len += bio->bi_iter.bi_size;
46 static int __blk_rq_unmap_user(struct bio *bio)
51 if (bio_flagged(bio, BIO_USER_MAPPED))
54 ret = bio_uncopy_user(bio);
61 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
62 * @q: request queue where request should be inserted
63 * @rq: request to map data to
64 * @map_data: pointer to the rq_map_data holding pages (if necessary)
65 * @iter: iovec iterator
66 * @gfp_mask: memory allocation flags
69 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
70 * a kernel bounce buffer is used.
72 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
73 * still in process context.
75 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
76 * before being submitted to the device, as pages mapped may be out of
77 * reach. It's the callers responsibility to make sure this happens. The
78 * original bio must be passed back in to blk_rq_unmap_user() for proper
81 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
82 struct rq_map_data *map_data,
83 const struct iov_iter *iter, gfp_t gfp_mask)
88 struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
90 if (!iter || !iter->count)
93 if (!iter_is_iovec(iter))
96 iov_for_each(iov, i, *iter) {
97 unsigned long uaddr = (unsigned long) iov.iov_base;
103 * Keep going so we check length of all segments
105 if ((uaddr & queue_dma_alignment(q)) ||
106 iovec_gap_to_prv(q, &prv, &iov))
109 prv.iov_base = iov.iov_base;
110 prv.iov_len = iov.iov_len;
113 if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
114 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
116 bio = bio_map_user_iov(q, iter, gfp_mask);
121 if (map_data && map_data->null_mapped)
122 bio_set_flag(bio, BIO_NULL_MAPPED);
124 if (bio->bi_iter.bi_size != iter->count) {
126 * Grab an extra reference to this bio, as bio_unmap_user()
127 * expects to be able to drop it twice as it happens on the
128 * normal IO completion path
132 __blk_rq_unmap_user(bio);
136 if (!bio_flagged(bio, BIO_USER_MAPPED))
137 rq->cmd_flags |= REQ_COPY_USER;
139 blk_queue_bounce(q, &bio);
141 blk_rq_bio_prep(q, rq, bio);
144 EXPORT_SYMBOL(blk_rq_map_user_iov);
146 int blk_rq_map_user(struct request_queue *q, struct request *rq,
147 struct rq_map_data *map_data, void __user *ubuf,
148 unsigned long len, gfp_t gfp_mask)
152 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
154 if (unlikely(ret < 0))
157 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
159 EXPORT_SYMBOL(blk_rq_map_user);
162 * blk_rq_unmap_user - unmap a request with user data
163 * @bio: start of bio list
166 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
167 * supply the original rq->bio from the blk_rq_map_user() return, since
168 * the I/O completion may have changed rq->bio.
170 int blk_rq_unmap_user(struct bio *bio)
172 struct bio *mapped_bio;
177 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
178 mapped_bio = bio->bi_private;
180 ret2 = __blk_rq_unmap_user(mapped_bio);
191 EXPORT_SYMBOL(blk_rq_unmap_user);
194 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
195 * @q: request queue where request should be inserted
196 * @rq: request to fill
197 * @kbuf: the kernel buffer
198 * @len: length of user data
199 * @gfp_mask: memory allocation flags
202 * Data will be mapped directly if possible. Otherwise a bounce
203 * buffer is used. Can be called multiple times to append multiple
206 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
207 unsigned int len, gfp_t gfp_mask)
209 int reading = rq_data_dir(rq) == READ;
210 unsigned long addr = (unsigned long) kbuf;
215 if (len > (queue_max_hw_sectors(q) << 9))
220 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
222 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
224 bio = bio_map_kern(q, kbuf, len, gfp_mask);
230 bio->bi_rw |= REQ_WRITE;
233 rq->cmd_flags |= REQ_COPY_USER;
235 ret = blk_rq_append_bio(q, rq, bio);
237 /* request is too big */
242 blk_queue_bounce(q, &rq->bio);
245 EXPORT_SYMBOL(blk_rq_map_kern);