scatterlist.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946
  1. /*
  2. * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
  3. *
  4. * Scatterlist handling helpers.
  5. *
  6. * This source code is licensed under the GNU General Public License,
  7. * Version 2. See the file COPYING for more details.
  8. */
  9. #include <linux/export.h>
  10. #include <linux/slab.h>
  11. #include <linux/scatterlist.h>
  12. #include <linux/highmem.h>
  13. #include <linux/kmemleak.h>
  14. /**
  15. * sg_next - return the next scatterlist entry in a list
  16. * @sg: The current sg entry
  17. *
  18. * Description:
  19. * Usually the next entry will be @sg@ + 1, but if this sg element is part
  20. * of a chained scatterlist, it could jump to the start of a new
  21. * scatterlist array.
  22. *
  23. **/
  24. struct scatterlist *sg_next(struct scatterlist *sg)
  25. {
  26. if (sg_is_last(sg))
  27. return NULL;
  28. sg++;
  29. if (unlikely(sg_is_chain(sg)))
  30. sg = sg_chain_ptr(sg);
  31. return sg;
  32. }
  33. EXPORT_SYMBOL(sg_next);
  34. /**
  35. * sg_nents - return total count of entries in scatterlist
  36. * @sg: The scatterlist
  37. *
  38. * Description:
  39. * Allows to know how many entries are in sg, taking into acount
  40. * chaining as well
  41. *
  42. **/
  43. int sg_nents(struct scatterlist *sg)
  44. {
  45. int nents;
  46. for (nents = 0; sg; sg = sg_next(sg))
  47. nents++;
  48. return nents;
  49. }
  50. EXPORT_SYMBOL(sg_nents);
  51. /**
  52. * sg_nents_for_len - return total count of entries in scatterlist
  53. * needed to satisfy the supplied length
  54. * @sg: The scatterlist
  55. * @len: The total required length
  56. *
  57. * Description:
  58. * Determines the number of entries in sg that are required to meet
  59. * the supplied length, taking into acount chaining as well
  60. *
  61. * Returns:
  62. * the number of sg entries needed, negative error on failure
  63. *
  64. **/
  65. int sg_nents_for_len(struct scatterlist *sg, u64 len)
  66. {
  67. int nents;
  68. u64 total;
  69. if (!len)
  70. return 0;
  71. for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
  72. nents++;
  73. total += sg->length;
  74. if (total >= len)
  75. return nents;
  76. }
  77. return -EINVAL;
  78. }
  79. EXPORT_SYMBOL(sg_nents_for_len);
  80. /**
  81. * sg_last - return the last scatterlist entry in a list
  82. * @sgl: First entry in the scatterlist
  83. * @nents: Number of entries in the scatterlist
  84. *
  85. * Description:
  86. * Should only be used casually, it (currently) scans the entire list
  87. * to get the last entry.
  88. *
  89. * Note that the @sgl@ pointer passed in need not be the first one,
  90. * the important bit is that @nents@ denotes the number of entries that
  91. * exist from @sgl@.
  92. *
  93. **/
  94. struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
  95. {
  96. struct scatterlist *sg, *ret = NULL;
  97. unsigned int i;
  98. for_each_sg(sgl, sg, nents, i)
  99. ret = sg;
  100. BUG_ON(!sg_is_last(ret));
  101. return ret;
  102. }
  103. EXPORT_SYMBOL(sg_last);
  104. /**
  105. * sg_init_table - Initialize SG table
  106. * @sgl: The SG table
  107. * @nents: Number of entries in table
  108. *
  109. * Notes:
  110. * If this is part of a chained sg table, sg_mark_end() should be
  111. * used only on the last table part.
  112. *
  113. **/
  114. void sg_init_table(struct scatterlist *sgl, unsigned int nents)
  115. {
  116. memset(sgl, 0, sizeof(*sgl) * nents);
  117. sg_init_marker(sgl, nents);
  118. }
  119. EXPORT_SYMBOL(sg_init_table);
  120. /**
  121. * sg_init_one - Initialize a single entry sg list
  122. * @sg: SG entry
  123. * @buf: Virtual address for IO
  124. * @buflen: IO length
  125. *
  126. **/
  127. void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
  128. {
  129. sg_init_table(sg, 1);
  130. sg_set_buf(sg, buf, buflen);
  131. }
  132. EXPORT_SYMBOL(sg_init_one);
  133. /*
  134. * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
  135. * helpers.
  136. */
  137. static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
  138. {
  139. if (nents == SG_MAX_SINGLE_ALLOC) {
  140. /*
  141. * Kmemleak doesn't track page allocations as they are not
  142. * commonly used (in a raw form) for kernel data structures.
  143. * As we chain together a list of pages and then a normal
  144. * kmalloc (tracked by kmemleak), in order to for that last
  145. * allocation not to become decoupled (and thus a
  146. * false-positive) we need to inform kmemleak of all the
  147. * intermediate allocations.
  148. */
  149. void *ptr = (void *) __get_free_page(gfp_mask);
  150. kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
  151. return ptr;
  152. } else
  153. return kmalloc_array(nents, sizeof(struct scatterlist),
  154. gfp_mask);
  155. }
  156. static void sg_kfree(struct scatterlist *sg, unsigned int nents)
  157. {
  158. if (nents == SG_MAX_SINGLE_ALLOC) {
  159. kmemleak_free(sg);
  160. free_page((unsigned long) sg);
  161. } else
  162. kfree(sg);
  163. }
  164. /**
  165. * __sg_free_table - Free a previously mapped sg table
  166. * @table: The sg table header to use
  167. * @max_ents: The maximum number of entries per single scatterlist
  168. * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
  169. * @free_fn: Free function
  170. *
  171. * Description:
  172. * Free an sg table previously allocated and setup with
  173. * __sg_alloc_table(). The @max_ents value must be identical to
  174. * that previously used with __sg_alloc_table().
  175. *
  176. **/
  177. void __sg_free_table(struct sg_table *table, unsigned int max_ents,
  178. bool skip_first_chunk, sg_free_fn *free_fn)
  179. {
  180. struct scatterlist *sgl, *next;
  181. if (unlikely(!table->sgl))
  182. return;
  183. sgl = table->sgl;
  184. while (table->orig_nents) {
  185. unsigned int alloc_size = table->orig_nents;
  186. unsigned int sg_size;
  187. /*
  188. * If we have more than max_ents segments left,
  189. * then assign 'next' to the sg table after the current one.
  190. * sg_size is then one less than alloc size, since the last
  191. * element is the chain pointer.
  192. */
  193. if (alloc_size > max_ents) {
  194. next = sg_chain_ptr(&sgl[max_ents - 1]);
  195. alloc_size = max_ents;
  196. sg_size = alloc_size - 1;
  197. } else {
  198. sg_size = alloc_size;
  199. next = NULL;
  200. }
  201. table->orig_nents -= sg_size;
  202. if (skip_first_chunk)
  203. skip_first_chunk = false;
  204. else
  205. free_fn(sgl, alloc_size);
  206. sgl = next;
  207. }
  208. table->sgl = NULL;
  209. }
  210. EXPORT_SYMBOL(__sg_free_table);
  211. /**
  212. * sg_free_table - Free a previously allocated sg table
  213. * @table: The mapped sg table header
  214. *
  215. **/
  216. void sg_free_table(struct sg_table *table)
  217. {
  218. __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
  219. }
  220. EXPORT_SYMBOL(sg_free_table);
  221. /**
  222. * __sg_alloc_table - Allocate and initialize an sg table with given allocator
  223. * @table: The sg table header to use
  224. * @nents: Number of entries in sg list
  225. * @max_ents: The maximum number of entries the allocator returns per call
  226. * @gfp_mask: GFP allocation mask
  227. * @alloc_fn: Allocator to use
  228. *
  229. * Description:
  230. * This function returns a @table @nents long. The allocator is
  231. * defined to return scatterlist chunks of maximum size @max_ents.
  232. * Thus if @nents is bigger than @max_ents, the scatterlists will be
  233. * chained in units of @max_ents.
  234. *
  235. * Notes:
  236. * If this function returns non-0 (eg failure), the caller must call
  237. * __sg_free_table() to cleanup any leftover allocations.
  238. *
  239. **/
  240. int __sg_alloc_table(struct sg_table *table, unsigned int nents,
  241. unsigned int max_ents, struct scatterlist *first_chunk,
  242. gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
  243. {
  244. struct scatterlist *sg, *prv;
  245. unsigned int left;
  246. memset(table, 0, sizeof(*table));
  247. if (nents == 0)
  248. return -EINVAL;
  249. #ifndef CONFIG_ARCH_HAS_SG_CHAIN
  250. if (WARN_ON_ONCE(nents > max_ents))
  251. return -EINVAL;
  252. #endif
  253. left = nents;
  254. prv = NULL;
  255. do {
  256. unsigned int sg_size, alloc_size = left;
  257. if (alloc_size > max_ents) {
  258. alloc_size = max_ents;
  259. sg_size = alloc_size - 1;
  260. } else
  261. sg_size = alloc_size;
  262. left -= sg_size;
  263. if (first_chunk) {
  264. sg = first_chunk;
  265. first_chunk = NULL;
  266. } else {
  267. sg = alloc_fn(alloc_size, gfp_mask);
  268. }
  269. if (unlikely(!sg)) {
  270. /*
  271. * Adjust entry count to reflect that the last
  272. * entry of the previous table won't be used for
  273. * linkage. Without this, sg_kfree() may get
  274. * confused.
  275. */
  276. if (prv)
  277. table->nents = ++table->orig_nents;
  278. return -ENOMEM;
  279. }
  280. sg_init_table(sg, alloc_size);
  281. table->nents = table->orig_nents += sg_size;
  282. /*
  283. * If this is the first mapping, assign the sg table header.
  284. * If this is not the first mapping, chain previous part.
  285. */
  286. if (prv)
  287. sg_chain(prv, max_ents, sg);
  288. else
  289. table->sgl = sg;
  290. /*
  291. * If no more entries after this one, mark the end
  292. */
  293. if (!left)
  294. sg_mark_end(&sg[sg_size - 1]);
  295. prv = sg;
  296. } while (left);
  297. return 0;
  298. }
  299. EXPORT_SYMBOL(__sg_alloc_table);
  300. /**
  301. * sg_alloc_table - Allocate and initialize an sg table
  302. * @table: The sg table header to use
  303. * @nents: Number of entries in sg list
  304. * @gfp_mask: GFP allocation mask
  305. *
  306. * Description:
  307. * Allocate and initialize an sg table. If @nents@ is larger than
  308. * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
  309. *
  310. **/
  311. int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
  312. {
  313. int ret;
  314. ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
  315. NULL, gfp_mask, sg_kmalloc);
  316. if (unlikely(ret))
  317. __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
  318. return ret;
  319. }
  320. EXPORT_SYMBOL(sg_alloc_table);
  321. /**
  322. * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
  323. * an array of pages
  324. * @sgt: The sg table header to use
  325. * @pages: Pointer to an array of page pointers
  326. * @n_pages: Number of pages in the pages array
  327. * @offset: Offset from start of the first page to the start of a buffer
  328. * @size: Number of valid bytes in the buffer (after offset)
  329. * @max_segment: Maximum size of a scatterlist node in bytes (page aligned)
  330. * @gfp_mask: GFP allocation mask
  331. *
  332. * Description:
  333. * Allocate and initialize an sg table from a list of pages. Contiguous
  334. * ranges of the pages are squashed into a single scatterlist node up to the
  335. * maximum size specified in @max_segment. An user may provide an offset at a
  336. * start and a size of valid data in a buffer specified by the page array.
  337. * The returned sg table is released by sg_free_table.
  338. *
  339. * Returns:
  340. * 0 on success, negative error on failure
  341. */
  342. int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
  343. unsigned int n_pages, unsigned int offset,
  344. unsigned long size, unsigned int max_segment,
  345. gfp_t gfp_mask)
  346. {
  347. unsigned int chunks, cur_page, seg_len, i;
  348. int ret;
  349. struct scatterlist *s;
  350. if (WARN_ON(!max_segment || offset_in_page(max_segment)))
  351. return -EINVAL;
  352. /* compute number of contiguous chunks */
  353. chunks = 1;
  354. seg_len = 0;
  355. for (i = 1; i < n_pages; i++) {
  356. seg_len += PAGE_SIZE;
  357. if (seg_len >= max_segment ||
  358. page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
  359. chunks++;
  360. seg_len = 0;
  361. }
  362. }
  363. ret = sg_alloc_table(sgt, chunks, gfp_mask);
  364. if (unlikely(ret))
  365. return ret;
  366. /* merging chunks and putting them into the scatterlist */
  367. cur_page = 0;
  368. for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
  369. unsigned int j, chunk_size;
  370. /* look for the end of the current chunk */
  371. seg_len = 0;
  372. for (j = cur_page + 1; j < n_pages; j++) {
  373. seg_len += PAGE_SIZE;
  374. if (seg_len >= max_segment ||
  375. page_to_pfn(pages[j]) !=
  376. page_to_pfn(pages[j - 1]) + 1)
  377. break;
  378. }
  379. chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
  380. sg_set_page(s, pages[cur_page],
  381. min_t(unsigned long, size, chunk_size), offset);
  382. size -= chunk_size;
  383. offset = 0;
  384. cur_page = j;
  385. }
  386. return 0;
  387. }
  388. EXPORT_SYMBOL(__sg_alloc_table_from_pages);
  389. /**
  390. * sg_alloc_table_from_pages - Allocate and initialize an sg table from
  391. * an array of pages
  392. * @sgt: The sg table header to use
  393. * @pages: Pointer to an array of page pointers
  394. * @n_pages: Number of pages in the pages array
  395. * @offset: Offset from start of the first page to the start of a buffer
  396. * @size: Number of valid bytes in the buffer (after offset)
  397. * @gfp_mask: GFP allocation mask
  398. *
  399. * Description:
  400. * Allocate and initialize an sg table from a list of pages. Contiguous
  401. * ranges of the pages are squashed into a single scatterlist node. A user
  402. * may provide an offset at a start and a size of valid data in a buffer
  403. * specified by the page array. The returned sg table is released by
  404. * sg_free_table.
  405. *
  406. * Returns:
  407. * 0 on success, negative error on failure
  408. */
  409. int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
  410. unsigned int n_pages, unsigned int offset,
  411. unsigned long size, gfp_t gfp_mask)
  412. {
  413. return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
  414. SCATTERLIST_MAX_SEGMENT, gfp_mask);
  415. }
  416. EXPORT_SYMBOL(sg_alloc_table_from_pages);
  417. #ifdef CONFIG_SGL_ALLOC
  418. /**
  419. * sgl_alloc_order - allocate a scatterlist and its pages
  420. * @length: Length in bytes of the scatterlist. Must be at least one
  421. * @order: Second argument for alloc_pages()
  422. * @chainable: Whether or not to allocate an extra element in the scatterlist
  423. * for scatterlist chaining purposes
  424. * @gfp: Memory allocation flags
  425. * @nent_p: [out] Number of entries in the scatterlist that have pages
  426. *
  427. * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
  428. */
  429. struct scatterlist *sgl_alloc_order(unsigned long long length,
  430. unsigned int order, bool chainable,
  431. gfp_t gfp, unsigned int *nent_p)
  432. {
  433. struct scatterlist *sgl, *sg;
  434. struct page *page;
  435. unsigned int nent, nalloc;
  436. u32 elem_len;
  437. nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
  438. /* Check for integer overflow */
  439. if (length > (nent << (PAGE_SHIFT + order)))
  440. return NULL;
  441. nalloc = nent;
  442. if (chainable) {
  443. /* Check for integer overflow */
  444. if (nalloc + 1 < nalloc)
  445. return NULL;
  446. nalloc++;
  447. }
  448. sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
  449. (gfp & ~GFP_DMA) | __GFP_ZERO);
  450. if (!sgl)
  451. return NULL;
  452. sg_init_table(sgl, nalloc);
  453. sg = sgl;
  454. while (length) {
  455. elem_len = min_t(u64, length, PAGE_SIZE << order);
  456. page = alloc_pages(gfp, order);
  457. if (!page) {
  458. sgl_free(sgl);
  459. return NULL;
  460. }
  461. sg_set_page(sg, page, elem_len, 0);
  462. length -= elem_len;
  463. sg = sg_next(sg);
  464. }
  465. WARN_ONCE(length, "length = %lld\n", length);
  466. if (nent_p)
  467. *nent_p = nent;
  468. return sgl;
  469. }
  470. EXPORT_SYMBOL(sgl_alloc_order);
  471. /**
  472. * sgl_alloc - allocate a scatterlist and its pages
  473. * @length: Length in bytes of the scatterlist
  474. * @gfp: Memory allocation flags
  475. * @nent_p: [out] Number of entries in the scatterlist
  476. *
  477. * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
  478. */
  479. struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
  480. unsigned int *nent_p)
  481. {
  482. return sgl_alloc_order(length, 0, false, gfp, nent_p);
  483. }
  484. EXPORT_SYMBOL(sgl_alloc);
  485. /**
  486. * sgl_free_n_order - free a scatterlist and its pages
  487. * @sgl: Scatterlist with one or more elements
  488. * @nents: Maximum number of elements to free
  489. * @order: Second argument for __free_pages()
  490. *
  491. * Notes:
  492. * - If several scatterlists have been chained and each chain element is
  493. * freed separately then it's essential to set nents correctly to avoid that a
  494. * page would get freed twice.
  495. * - All pages in a chained scatterlist can be freed at once by setting @nents
  496. * to a high number.
  497. */
  498. void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
  499. {
  500. struct scatterlist *sg;
  501. struct page *page;
  502. int i;
  503. for_each_sg(sgl, sg, nents, i) {
  504. if (!sg)
  505. break;
  506. page = sg_page(sg);
  507. if (page)
  508. __free_pages(page, order);
  509. }
  510. kfree(sgl);
  511. }
  512. EXPORT_SYMBOL(sgl_free_n_order);
  513. /**
  514. * sgl_free_order - free a scatterlist and its pages
  515. * @sgl: Scatterlist with one or more elements
  516. * @order: Second argument for __free_pages()
  517. */
  518. void sgl_free_order(struct scatterlist *sgl, int order)
  519. {
  520. sgl_free_n_order(sgl, INT_MAX, order);
  521. }
  522. EXPORT_SYMBOL(sgl_free_order);
  523. /**
  524. * sgl_free - free a scatterlist and its pages
  525. * @sgl: Scatterlist with one or more elements
  526. */
  527. void sgl_free(struct scatterlist *sgl)
  528. {
  529. sgl_free_order(sgl, 0);
  530. }
  531. EXPORT_SYMBOL(sgl_free);
  532. #endif /* CONFIG_SGL_ALLOC */
  533. void __sg_page_iter_start(struct sg_page_iter *piter,
  534. struct scatterlist *sglist, unsigned int nents,
  535. unsigned long pgoffset)
  536. {
  537. piter->__pg_advance = 0;
  538. piter->__nents = nents;
  539. piter->sg = sglist;
  540. piter->sg_pgoffset = pgoffset;
  541. }
  542. EXPORT_SYMBOL(__sg_page_iter_start);
  543. static int sg_page_count(struct scatterlist *sg)
  544. {
  545. return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
  546. }
  547. bool __sg_page_iter_next(struct sg_page_iter *piter)
  548. {
  549. if (!piter->__nents || !piter->sg)
  550. return false;
  551. piter->sg_pgoffset += piter->__pg_advance;
  552. piter->__pg_advance = 1;
  553. while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
  554. piter->sg_pgoffset -= sg_page_count(piter->sg);
  555. piter->sg = sg_next(piter->sg);
  556. if (!--piter->__nents || !piter->sg)
  557. return false;
  558. }
  559. return true;
  560. }
  561. EXPORT_SYMBOL(__sg_page_iter_next);
  562. /**
  563. * sg_miter_start - start mapping iteration over a sg list
  564. * @miter: sg mapping iter to be started
  565. * @sgl: sg list to iterate over
  566. * @nents: number of sg entries
  567. *
  568. * Description:
  569. * Starts mapping iterator @miter.
  570. *
  571. * Context:
  572. * Don't care.
  573. */
  574. void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
  575. unsigned int nents, unsigned int flags)
  576. {
  577. memset(miter, 0, sizeof(struct sg_mapping_iter));
  578. __sg_page_iter_start(&miter->piter, sgl, nents, 0);
  579. WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
  580. miter->__flags = flags;
  581. }
  582. EXPORT_SYMBOL(sg_miter_start);
  583. static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
  584. {
  585. if (!miter->__remaining) {
  586. struct scatterlist *sg;
  587. if (!__sg_page_iter_next(&miter->piter))
  588. return false;
  589. sg = miter->piter.sg;
  590. miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
  591. miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
  592. miter->__offset &= PAGE_SIZE - 1;
  593. miter->__remaining = sg->offset + sg->length -
  594. (miter->piter.sg_pgoffset << PAGE_SHIFT) -
  595. miter->__offset;
  596. miter->__remaining = min_t(unsigned long, miter->__remaining,
  597. PAGE_SIZE - miter->__offset);
  598. }
  599. return true;
  600. }
  601. /**
  602. * sg_miter_skip - reposition mapping iterator
  603. * @miter: sg mapping iter to be skipped
  604. * @offset: number of bytes to plus the current location
  605. *
  606. * Description:
  607. * Sets the offset of @miter to its current location plus @offset bytes.
  608. * If mapping iterator @miter has been proceeded by sg_miter_next(), this
  609. * stops @miter.
  610. *
  611. * Context:
  612. * Don't care if @miter is stopped, or not proceeded yet.
  613. * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
  614. *
  615. * Returns:
  616. * true if @miter contains the valid mapping. false if end of sg
  617. * list is reached.
  618. */
  619. bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
  620. {
  621. sg_miter_stop(miter);
  622. while (offset) {
  623. off_t consumed;
  624. if (!sg_miter_get_next_page(miter))
  625. return false;
  626. consumed = min_t(off_t, offset, miter->__remaining);
  627. miter->__offset += consumed;
  628. miter->__remaining -= consumed;
  629. offset -= consumed;
  630. }
  631. return true;
  632. }
  633. EXPORT_SYMBOL(sg_miter_skip);
  634. /**
  635. * sg_miter_next - proceed mapping iterator to the next mapping
  636. * @miter: sg mapping iter to proceed
  637. *
  638. * Description:
  639. * Proceeds @miter to the next mapping. @miter should have been started
  640. * using sg_miter_start(). On successful return, @miter->page,
  641. * @miter->addr and @miter->length point to the current mapping.
  642. *
  643. * Context:
  644. * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
  645. * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
  646. *
  647. * Returns:
  648. * true if @miter contains the next mapping. false if end of sg
  649. * list is reached.
  650. */
  651. bool sg_miter_next(struct sg_mapping_iter *miter)
  652. {
  653. sg_miter_stop(miter);
  654. /*
  655. * Get to the next page if necessary.
  656. * __remaining, __offset is adjusted by sg_miter_stop
  657. */
  658. if (!sg_miter_get_next_page(miter))
  659. return false;
  660. miter->page = sg_page_iter_page(&miter->piter);
  661. miter->consumed = miter->length = miter->__remaining;
  662. if (miter->__flags & SG_MITER_ATOMIC)
  663. miter->addr = kmap_atomic(miter->page) + miter->__offset;
  664. else
  665. miter->addr = kmap(miter->page) + miter->__offset;
  666. return true;
  667. }
  668. EXPORT_SYMBOL(sg_miter_next);
  669. /**
  670. * sg_miter_stop - stop mapping iteration
  671. * @miter: sg mapping iter to be stopped
  672. *
  673. * Description:
  674. * Stops mapping iterator @miter. @miter should have been started
  675. * using sg_miter_start(). A stopped iteration can be resumed by
  676. * calling sg_miter_next() on it. This is useful when resources (kmap)
  677. * need to be released during iteration.
  678. *
  679. * Context:
  680. * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
  681. * otherwise.
  682. */
  683. void sg_miter_stop(struct sg_mapping_iter *miter)
  684. {
  685. WARN_ON(miter->consumed > miter->length);
  686. /* drop resources from the last iteration */
  687. if (miter->addr) {
  688. miter->__offset += miter->consumed;
  689. miter->__remaining -= miter->consumed;
  690. if ((miter->__flags & SG_MITER_TO_SG) &&
  691. !PageSlab(miter->page))
  692. flush_kernel_dcache_page(miter->page);
  693. if (miter->__flags & SG_MITER_ATOMIC) {
  694. WARN_ON_ONCE(preemptible());
  695. kunmap_atomic(miter->addr);
  696. } else
  697. kunmap(miter->page);
  698. miter->page = NULL;
  699. miter->addr = NULL;
  700. miter->length = 0;
  701. miter->consumed = 0;
  702. }
  703. }
  704. EXPORT_SYMBOL(sg_miter_stop);
  705. /**
  706. * sg_copy_buffer - Copy data between a linear buffer and an SG list
  707. * @sgl: The SG list
  708. * @nents: Number of SG entries
  709. * @buf: Where to copy from
  710. * @buflen: The number of bytes to copy
  711. * @skip: Number of bytes to skip before copying
  712. * @to_buffer: transfer direction (true == from an sg list to a
  713. * buffer, false == from a buffer to an sg list
  714. *
  715. * Returns the number of copied bytes.
  716. *
  717. **/
  718. size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
  719. size_t buflen, off_t skip, bool to_buffer)
  720. {
  721. unsigned int offset = 0;
  722. struct sg_mapping_iter miter;
  723. unsigned int sg_flags = SG_MITER_ATOMIC;
  724. if (to_buffer)
  725. sg_flags |= SG_MITER_FROM_SG;
  726. else
  727. sg_flags |= SG_MITER_TO_SG;
  728. sg_miter_start(&miter, sgl, nents, sg_flags);
  729. if (!sg_miter_skip(&miter, skip))
  730. return false;
  731. while ((offset < buflen) && sg_miter_next(&miter)) {
  732. unsigned int len;
  733. len = min(miter.length, buflen - offset);
  734. if (to_buffer)
  735. memcpy(buf + offset, miter.addr, len);
  736. else
  737. memcpy(miter.addr, buf + offset, len);
  738. offset += len;
  739. }
  740. sg_miter_stop(&miter);
  741. return offset;
  742. }
  743. EXPORT_SYMBOL(sg_copy_buffer);
  744. /**
  745. * sg_copy_from_buffer - Copy from a linear buffer to an SG list
  746. * @sgl: The SG list
  747. * @nents: Number of SG entries
  748. * @buf: Where to copy from
  749. * @buflen: The number of bytes to copy
  750. *
  751. * Returns the number of copied bytes.
  752. *
  753. **/
  754. size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
  755. const void *buf, size_t buflen)
  756. {
  757. return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
  758. }
  759. EXPORT_SYMBOL(sg_copy_from_buffer);
  760. /**
  761. * sg_copy_to_buffer - Copy from an SG list to a linear buffer
  762. * @sgl: The SG list
  763. * @nents: Number of SG entries
  764. * @buf: Where to copy to
  765. * @buflen: The number of bytes to copy
  766. *
  767. * Returns the number of copied bytes.
  768. *
  769. **/
  770. size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
  771. void *buf, size_t buflen)
  772. {
  773. return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
  774. }
  775. EXPORT_SYMBOL(sg_copy_to_buffer);
  776. /**
  777. * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
  778. * @sgl: The SG list
  779. * @nents: Number of SG entries
  780. * @buf: Where to copy from
  781. * @buflen: The number of bytes to copy
  782. * @skip: Number of bytes to skip before copying
  783. *
  784. * Returns the number of copied bytes.
  785. *
  786. **/
  787. size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
  788. const void *buf, size_t buflen, off_t skip)
  789. {
  790. return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
  791. }
  792. EXPORT_SYMBOL(sg_pcopy_from_buffer);
  793. /**
  794. * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
  795. * @sgl: The SG list
  796. * @nents: Number of SG entries
  797. * @buf: Where to copy to
  798. * @buflen: The number of bytes to copy
  799. * @skip: Number of bytes to skip before copying
  800. *
  801. * Returns the number of copied bytes.
  802. *
  803. **/
  804. size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
  805. void *buf, size_t buflen, off_t skip)
  806. {
  807. return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
  808. }
  809. EXPORT_SYMBOL(sg_pcopy_to_buffer);
  810. /**
  811. * sg_zero_buffer - Zero-out a part of a SG list
  812. * @sgl: The SG list
  813. * @nents: Number of SG entries
  814. * @buflen: The number of bytes to zero out
  815. * @skip: Number of bytes to skip before zeroing
  816. *
  817. * Returns the number of bytes zeroed.
  818. **/
  819. size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
  820. size_t buflen, off_t skip)
  821. {
  822. unsigned int offset = 0;
  823. struct sg_mapping_iter miter;
  824. unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
  825. sg_miter_start(&miter, sgl, nents, sg_flags);
  826. if (!sg_miter_skip(&miter, skip))
  827. return false;
  828. while (offset < buflen && sg_miter_next(&miter)) {
  829. unsigned int len;
  830. len = min(miter.length, buflen - offset);
  831. memset(miter.addr, 0, len);
  832. offset += len;
  833. }
  834. sg_miter_stop(&miter);
  835. return offset;
  836. }
  837. EXPORT_SYMBOL(sg_zero_buffer);