scatterlist.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SCATTERLIST_H
  3. #define _LINUX_SCATTERLIST_H
  4. #include <linux/string.h>
  5. #include <linux/types.h>
  6. #include <linux/bug.h>
  7. #include <linux/mm.h>
  8. #include <asm/io.h>
  9. struct scatterlist {
  10. unsigned long page_link;
  11. unsigned int offset;
  12. unsigned int length;
  13. dma_addr_t dma_address;
  14. #ifdef CONFIG_NEED_SG_DMA_LENGTH
  15. unsigned int dma_length;
  16. #endif
  17. };
  18. /*
  19. * Since the above length field is an unsigned int, below we define the maximum
  20. * length in bytes that can be stored in one scatterlist entry.
  21. */
  22. #define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK)
  23. /*
  24. * These macros should be used after a dma_map_sg call has been done
  25. * to get bus addresses of each of the SG entries and their lengths.
  26. * You should only work with the number of sg entries dma_map_sg
  27. * returns, or alternatively stop on the first sg_dma_len(sg) which
  28. * is 0.
  29. */
  30. #define sg_dma_address(sg) ((sg)->dma_address)
  31. #ifdef CONFIG_NEED_SG_DMA_LENGTH
  32. #define sg_dma_len(sg) ((sg)->dma_length)
  33. #else
  34. #define sg_dma_len(sg) ((sg)->length)
  35. #endif
  36. struct sg_table {
  37. struct scatterlist *sgl; /* the list */
  38. unsigned int nents; /* number of mapped entries */
  39. unsigned int orig_nents; /* original size of list */
  40. };
  41. /*
  42. * Notes on SG table design.
  43. *
  44. * We use the unsigned long page_link field in the scatterlist struct to place
  45. * the page pointer AND encode information about the sg table as well. The two
  46. * lower bits are reserved for this information.
  47. *
  48. * If bit 0 is set, then the page_link contains a pointer to the next sg
  49. * table list. Otherwise the next entry is at sg + 1.
  50. *
  51. * If bit 1 is set, then this sg entry is the last element in a list.
  52. *
  53. * See sg_next().
  54. *
  55. */
  56. #define SG_CHAIN 0x01UL
  57. #define SG_END 0x02UL
  58. /*
  59. * We overload the LSB of the page pointer to indicate whether it's
  60. * a valid sg entry, or whether it points to the start of a new scatterlist.
  61. * Those low bits are there for everyone! (thanks mason :-)
  62. */
  63. #define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN)
  64. #define sg_is_last(sg) ((sg)->page_link & SG_END)
  65. #define sg_chain_ptr(sg) \
  66. ((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END)))
  67. /**
  68. * sg_assign_page - Assign a given page to an SG entry
  69. * @sg: SG entry
  70. * @page: The page
  71. *
  72. * Description:
  73. * Assign page to sg entry. Also see sg_set_page(), the most commonly used
  74. * variant.
  75. *
  76. **/
  77. static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
  78. {
  79. unsigned long page_link = sg->page_link & (SG_CHAIN | SG_END);
  80. /*
  81. * In order for the low bit stealing approach to work, pages
  82. * must be aligned at a 32-bit boundary as a minimum.
  83. */
  84. BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
  85. #ifdef CONFIG_DEBUG_SG
  86. BUG_ON(sg_is_chain(sg));
  87. #endif
  88. sg->page_link = page_link | (unsigned long) page;
  89. }
  90. /**
  91. * sg_set_page - Set sg entry to point at given page
  92. * @sg: SG entry
  93. * @page: The page
  94. * @len: Length of data
  95. * @offset: Offset into page
  96. *
  97. * Description:
  98. * Use this function to set an sg entry pointing at a page, never assign
  99. * the page directly. We encode sg table information in the lower bits
  100. * of the page pointer. See sg_page() for looking up the page belonging
  101. * to an sg entry.
  102. *
  103. **/
  104. static inline void sg_set_page(struct scatterlist *sg, struct page *page,
  105. unsigned int len, unsigned int offset)
  106. {
  107. sg_assign_page(sg, page);
  108. sg->offset = offset;
  109. sg->length = len;
  110. }
  111. static inline struct page *sg_page(struct scatterlist *sg)
  112. {
  113. #ifdef CONFIG_DEBUG_SG
  114. BUG_ON(sg_is_chain(sg));
  115. #endif
  116. return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
  117. }
  118. /**
  119. * sg_set_buf - Set sg entry to point at given data
  120. * @sg: SG entry
  121. * @buf: Data
  122. * @buflen: Data length
  123. *
  124. **/
  125. static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
  126. unsigned int buflen)
  127. {
  128. #ifdef CONFIG_DEBUG_SG
  129. BUG_ON(!virt_addr_valid(buf));
  130. #endif
  131. sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
  132. }
  133. /*
  134. * Loop over each sg element, following the pointer to a new list if necessary
  135. */
  136. #define for_each_sg(sglist, sg, nr, __i) \
  137. for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
  138. /**
  139. * sg_chain - Chain two sglists together
  140. * @prv: First scatterlist
  141. * @prv_nents: Number of entries in prv
  142. * @sgl: Second scatterlist
  143. *
  144. * Description:
  145. * Links @prv@ and @sgl@ together, to form a longer scatterlist.
  146. *
  147. **/
  148. static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
  149. struct scatterlist *sgl)
  150. {
  151. /*
  152. * offset and length are unused for chain entry. Clear them.
  153. */
  154. prv[prv_nents - 1].offset = 0;
  155. prv[prv_nents - 1].length = 0;
  156. /*
  157. * Set lowest bit to indicate a link pointer, and make sure to clear
  158. * the termination bit if it happens to be set.
  159. */
  160. prv[prv_nents - 1].page_link = ((unsigned long) sgl | SG_CHAIN)
  161. & ~SG_END;
  162. }
  163. /**
  164. * sg_mark_end - Mark the end of the scatterlist
  165. * @sg: SG entryScatterlist
  166. *
  167. * Description:
  168. * Marks the passed in sg entry as the termination point for the sg
  169. * table. A call to sg_next() on this entry will return NULL.
  170. *
  171. **/
  172. static inline void sg_mark_end(struct scatterlist *sg)
  173. {
  174. /*
  175. * Set termination bit, clear potential chain bit
  176. */
  177. sg->page_link |= SG_END;
  178. sg->page_link &= ~SG_CHAIN;
  179. }
  180. /**
  181. * sg_unmark_end - Undo setting the end of the scatterlist
  182. * @sg: SG entryScatterlist
  183. *
  184. * Description:
  185. * Removes the termination marker from the given entry of the scatterlist.
  186. *
  187. **/
  188. static inline void sg_unmark_end(struct scatterlist *sg)
  189. {
  190. sg->page_link &= ~SG_END;
  191. }
  192. /**
  193. * sg_phys - Return physical address of an sg entry
  194. * @sg: SG entry
  195. *
  196. * Description:
  197. * This calls page_to_phys() on the page in this sg entry, and adds the
  198. * sg offset. The caller must know that it is legal to call page_to_phys()
  199. * on the sg page.
  200. *
  201. **/
  202. static inline dma_addr_t sg_phys(struct scatterlist *sg)
  203. {
  204. return page_to_phys(sg_page(sg)) + sg->offset;
  205. }
  206. /**
  207. * sg_virt - Return virtual address of an sg entry
  208. * @sg: SG entry
  209. *
  210. * Description:
  211. * This calls page_address() on the page in this sg entry, and adds the
  212. * sg offset. The caller must know that the sg page has a valid virtual
  213. * mapping.
  214. *
  215. **/
  216. static inline void *sg_virt(struct scatterlist *sg)
  217. {
  218. return page_address(sg_page(sg)) + sg->offset;
  219. }
  220. /**
  221. * sg_init_marker - Initialize markers in sg table
  222. * @sgl: The SG table
  223. * @nents: Number of entries in table
  224. *
  225. **/
  226. static inline void sg_init_marker(struct scatterlist *sgl,
  227. unsigned int nents)
  228. {
  229. sg_mark_end(&sgl[nents - 1]);
  230. }
  231. int sg_nents(struct scatterlist *sg);
  232. int sg_nents_for_len(struct scatterlist *sg, u64 len);
  233. struct scatterlist *sg_next(struct scatterlist *);
  234. struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
  235. void sg_init_table(struct scatterlist *, unsigned int);
  236. void sg_init_one(struct scatterlist *, const void *, unsigned int);
  237. int sg_split(struct scatterlist *in, const int in_mapped_nents,
  238. const off_t skip, const int nb_splits,
  239. const size_t *split_sizes,
  240. struct scatterlist **out, int *out_mapped_nents,
  241. gfp_t gfp_mask);
  242. typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
  243. typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
  244. void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *);
  245. void sg_free_table(struct sg_table *);
  246. int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
  247. struct scatterlist *, gfp_t, sg_alloc_fn *);
  248. int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
  249. int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
  250. unsigned int n_pages, unsigned int offset,
  251. unsigned long size, unsigned int max_segment,
  252. gfp_t gfp_mask);
  253. int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
  254. unsigned int n_pages, unsigned int offset,
  255. unsigned long size, gfp_t gfp_mask);
  256. #ifdef CONFIG_SGL_ALLOC
  257. struct scatterlist *sgl_alloc_order(unsigned long long length,
  258. unsigned int order, bool chainable,
  259. gfp_t gfp, unsigned int *nent_p);
  260. struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
  261. unsigned int *nent_p);
  262. void sgl_free_n_order(struct scatterlist *sgl, int nents, int order);
  263. void sgl_free_order(struct scatterlist *sgl, int order);
  264. void sgl_free(struct scatterlist *sgl);
  265. #endif /* CONFIG_SGL_ALLOC */
  266. size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
  267. size_t buflen, off_t skip, bool to_buffer);
  268. size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
  269. const void *buf, size_t buflen);
  270. size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
  271. void *buf, size_t buflen);
  272. size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
  273. const void *buf, size_t buflen, off_t skip);
  274. size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
  275. void *buf, size_t buflen, off_t skip);
  276. size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
  277. size_t buflen, off_t skip);
  278. /*
  279. * Maximum number of entries that will be allocated in one piece, if
  280. * a list larger than this is required then chaining will be utilized.
  281. */
  282. #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
  283. /*
  284. * The maximum number of SG segments that we will put inside a
  285. * scatterlist (unless chaining is used). Should ideally fit inside a
  286. * single page, to avoid a higher order allocation. We could define this
  287. * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The
  288. * minimum value is 32
  289. */
  290. #define SG_CHUNK_SIZE 128
  291. /*
  292. * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit
  293. * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
  294. */
  295. #ifdef CONFIG_ARCH_HAS_SG_CHAIN
  296. #define SG_MAX_SEGMENTS 2048
  297. #else
  298. #define SG_MAX_SEGMENTS SG_CHUNK_SIZE
  299. #endif
  300. #ifdef CONFIG_SG_POOL
  301. void sg_free_table_chained(struct sg_table *table, bool first_chunk);
  302. int sg_alloc_table_chained(struct sg_table *table, int nents,
  303. struct scatterlist *first_chunk);
  304. #endif
  305. /*
  306. * sg page iterator
  307. *
  308. * Iterates over sg entries page-by-page. On each successful iteration,
  309. * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter)
  310. * to get the current page and its dma address. @piter->sg will point to the
  311. * sg holding this page and @piter->sg_pgoffset to the page's page offset
  312. * within the sg. The iteration will stop either when a maximum number of sg
  313. * entries was reached or a terminating sg (sg_last(sg) == true) was reached.
  314. */
  315. struct sg_page_iter {
  316. struct scatterlist *sg; /* sg holding the page */
  317. unsigned int sg_pgoffset; /* page offset within the sg */
  318. /* these are internal states, keep away */
  319. unsigned int __nents; /* remaining sg entries */
  320. int __pg_advance; /* nr pages to advance at the
  321. * next step */
  322. };
  323. bool __sg_page_iter_next(struct sg_page_iter *piter);
  324. void __sg_page_iter_start(struct sg_page_iter *piter,
  325. struct scatterlist *sglist, unsigned int nents,
  326. unsigned long pgoffset);
  327. /**
  328. * sg_page_iter_page - get the current page held by the page iterator
  329. * @piter: page iterator holding the page
  330. */
  331. static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
  332. {
  333. return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
  334. }
  335. /**
  336. * sg_page_iter_dma_address - get the dma address of the current page held by
  337. * the page iterator.
  338. * @piter: page iterator holding the page
  339. */
  340. static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter)
  341. {
  342. return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT);
  343. }
  344. /**
  345. * for_each_sg_page - iterate over the pages of the given sg list
  346. * @sglist: sglist to iterate over
  347. * @piter: page iterator to hold current page, sg, sg_pgoffset
  348. * @nents: maximum number of sg entries to iterate over
  349. * @pgoffset: starting page offset
  350. */
  351. #define for_each_sg_page(sglist, piter, nents, pgoffset) \
  352. for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
  353. __sg_page_iter_next(piter);)
  354. /*
  355. * Mapping sg iterator
  356. *
  357. * Iterates over sg entries mapping page-by-page. On each successful
  358. * iteration, @miter->page points to the mapped page and
  359. * @miter->length bytes of data can be accessed at @miter->addr. As
  360. * long as an interation is enclosed between start and stop, the user
  361. * is free to choose control structure and when to stop.
  362. *
  363. * @miter->consumed is set to @miter->length on each iteration. It
  364. * can be adjusted if the user can't consume all the bytes in one go.
  365. * Also, a stopped iteration can be resumed by calling next on it.
  366. * This is useful when iteration needs to release all resources and
  367. * continue later (e.g. at the next interrupt).
  368. */
  369. #define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
  370. #define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */
  371. #define SG_MITER_FROM_SG (1 << 2) /* nop */
  372. struct sg_mapping_iter {
  373. /* the following three fields can be accessed directly */
  374. struct page *page; /* currently mapped page */
  375. void *addr; /* pointer to the mapped area */
  376. size_t length; /* length of the mapped area */
  377. size_t consumed; /* number of consumed bytes */
  378. struct sg_page_iter piter; /* page iterator */
  379. /* these are internal states, keep away */
  380. unsigned int __offset; /* offset within page */
  381. unsigned int __remaining; /* remaining bytes on page */
  382. unsigned int __flags;
  383. };
  384. void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
  385. unsigned int nents, unsigned int flags);
  386. bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset);
  387. bool sg_miter_next(struct sg_mapping_iter *miter);
  388. void sg_miter_stop(struct sg_mapping_iter *miter);
  389. #endif /* _LINUX_SCATTERLIST_H */