blk-map.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. /*
  2. * Functions related to mapping data to requests
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/bio.h>
  7. #include <linux/blkdev.h>
  8. #include <linux/uio.h>
  9. #include "blk.h"
  10. int blk_rq_append_bio(struct request_queue *q, struct request *rq,
  11. struct bio *bio)
  12. {
  13. if (!rq->bio)
  14. blk_rq_bio_prep(q, rq, bio);
  15. else if (!ll_back_merge_fn(q, rq, bio))
  16. return -EINVAL;
  17. else {
  18. rq->biotail->bi_next = bio;
  19. rq->biotail = bio;
  20. rq->__data_len += bio->bi_iter.bi_size;
  21. }
  22. return 0;
  23. }
  24. static int __blk_rq_unmap_user(struct bio *bio)
  25. {
  26. int ret = 0;
  27. if (bio) {
  28. if (bio_flagged(bio, BIO_USER_MAPPED))
  29. bio_unmap_user(bio);
  30. else
  31. ret = bio_uncopy_user(bio);
  32. }
  33. return ret;
  34. }
  35. /**
  36. * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
  37. * @q: request queue where request should be inserted
  38. * @rq: request to map data to
  39. * @map_data: pointer to the rq_map_data holding pages (if necessary)
  40. * @iter: iovec iterator
  41. * @gfp_mask: memory allocation flags
  42. *
  43. * Description:
  44. * Data will be mapped directly for zero copy I/O, if possible. Otherwise
  45. * a kernel bounce buffer is used.
  46. *
  47. * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
  48. * still in process context.
  49. *
  50. * Note: The mapped bio may need to be bounced through blk_queue_bounce()
  51. * before being submitted to the device, as pages mapped may be out of
  52. * reach. It's the callers responsibility to make sure this happens. The
  53. * original bio must be passed back in to blk_rq_unmap_user() for proper
  54. * unmapping.
  55. */
  56. int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
  57. struct rq_map_data *map_data,
  58. const struct iov_iter *iter, gfp_t gfp_mask)
  59. {
  60. struct bio *bio;
  61. int unaligned = 0;
  62. struct iov_iter i;
  63. struct iovec iov;
  64. if (!iter || !iter->count)
  65. return -EINVAL;
  66. iov_for_each(iov, i, *iter) {
  67. unsigned long uaddr = (unsigned long) iov.iov_base;
  68. if (!iov.iov_len)
  69. return -EINVAL;
  70. /*
  71. * Keep going so we check length of all segments
  72. */
  73. if (uaddr & queue_dma_alignment(q))
  74. unaligned = 1;
  75. }
  76. if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
  77. bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
  78. else
  79. bio = bio_map_user_iov(q, iter, gfp_mask);
  80. if (IS_ERR(bio))
  81. return PTR_ERR(bio);
  82. if (map_data && map_data->null_mapped)
  83. bio->bi_flags |= (1 << BIO_NULL_MAPPED);
  84. if (bio->bi_iter.bi_size != iter->count) {
  85. /*
  86. * Grab an extra reference to this bio, as bio_unmap_user()
  87. * expects to be able to drop it twice as it happens on the
  88. * normal IO completion path
  89. */
  90. bio_get(bio);
  91. bio_endio(bio, 0);
  92. __blk_rq_unmap_user(bio);
  93. return -EINVAL;
  94. }
  95. if (!bio_flagged(bio, BIO_USER_MAPPED))
  96. rq->cmd_flags |= REQ_COPY_USER;
  97. blk_queue_bounce(q, &bio);
  98. bio_get(bio);
  99. blk_rq_bio_prep(q, rq, bio);
  100. return 0;
  101. }
  102. EXPORT_SYMBOL(blk_rq_map_user_iov);
  103. int blk_rq_map_user(struct request_queue *q, struct request *rq,
  104. struct rq_map_data *map_data, void __user *ubuf,
  105. unsigned long len, gfp_t gfp_mask)
  106. {
  107. struct iovec iov;
  108. struct iov_iter i;
  109. int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
  110. if (unlikely(ret < 0))
  111. return ret;
  112. return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
  113. }
  114. EXPORT_SYMBOL(blk_rq_map_user);
  115. /**
  116. * blk_rq_unmap_user - unmap a request with user data
  117. * @bio: start of bio list
  118. *
  119. * Description:
  120. * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
  121. * supply the original rq->bio from the blk_rq_map_user() return, since
  122. * the I/O completion may have changed rq->bio.
  123. */
  124. int blk_rq_unmap_user(struct bio *bio)
  125. {
  126. struct bio *mapped_bio;
  127. int ret = 0, ret2;
  128. while (bio) {
  129. mapped_bio = bio;
  130. if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
  131. mapped_bio = bio->bi_private;
  132. ret2 = __blk_rq_unmap_user(mapped_bio);
  133. if (ret2 && !ret)
  134. ret = ret2;
  135. mapped_bio = bio;
  136. bio = bio->bi_next;
  137. bio_put(mapped_bio);
  138. }
  139. return ret;
  140. }
  141. EXPORT_SYMBOL(blk_rq_unmap_user);
  142. /**
  143. * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
  144. * @q: request queue where request should be inserted
  145. * @rq: request to fill
  146. * @kbuf: the kernel buffer
  147. * @len: length of user data
  148. * @gfp_mask: memory allocation flags
  149. *
  150. * Description:
  151. * Data will be mapped directly if possible. Otherwise a bounce
  152. * buffer is used. Can be called multiple times to append multiple
  153. * buffers.
  154. */
  155. int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
  156. unsigned int len, gfp_t gfp_mask)
  157. {
  158. int reading = rq_data_dir(rq) == READ;
  159. unsigned long addr = (unsigned long) kbuf;
  160. int do_copy = 0;
  161. struct bio *bio;
  162. int ret;
  163. if (len > (queue_max_hw_sectors(q) << 9))
  164. return -EINVAL;
  165. if (!len || !kbuf)
  166. return -EINVAL;
  167. do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
  168. if (do_copy)
  169. bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
  170. else
  171. bio = bio_map_kern(q, kbuf, len, gfp_mask);
  172. if (IS_ERR(bio))
  173. return PTR_ERR(bio);
  174. if (!reading)
  175. bio->bi_rw |= REQ_WRITE;
  176. if (do_copy)
  177. rq->cmd_flags |= REQ_COPY_USER;
  178. ret = blk_rq_append_bio(q, rq, bio);
  179. if (unlikely(ret)) {
  180. /* request is too big */
  181. bio_put(bio);
  182. return ret;
  183. }
  184. blk_queue_bounce(q, &rq->bio);
  185. return 0;
  186. }
  187. EXPORT_SYMBOL(blk_rq_map_kern);