dma-mapping-common.h 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. #ifndef _ASM_GENERIC_DMA_MAPPING_H
  2. #define _ASM_GENERIC_DMA_MAPPING_H
  3. #include <linux/kmemcheck.h>
  4. #include <linux/bug.h>
  5. #include <linux/scatterlist.h>
  6. #include <linux/dma-debug.h>
  7. #include <linux/dma-attrs.h>
  8. static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
  9. size_t size,
  10. enum dma_data_direction dir,
  11. struct dma_attrs *attrs)
  12. {
  13. struct dma_map_ops *ops = get_dma_ops(dev);
  14. dma_addr_t addr;
  15. kmemcheck_mark_initialized(ptr, size);
  16. BUG_ON(!valid_dma_direction(dir));
  17. addr = ops->map_page(dev, virt_to_page(ptr),
  18. (unsigned long)ptr & ~PAGE_MASK, size,
  19. dir, attrs);
  20. debug_dma_map_page(dev, virt_to_page(ptr),
  21. (unsigned long)ptr & ~PAGE_MASK, size,
  22. dir, addr, true);
  23. return addr;
  24. }
  25. static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
  26. size_t size,
  27. enum dma_data_direction dir,
  28. struct dma_attrs *attrs)
  29. {
  30. struct dma_map_ops *ops = get_dma_ops(dev);
  31. BUG_ON(!valid_dma_direction(dir));
  32. if (ops->unmap_page)
  33. ops->unmap_page(dev, addr, size, dir, attrs);
  34. debug_dma_unmap_page(dev, addr, size, dir, true);
  35. }
  36. /*
  37. * dma_maps_sg_attrs returns 0 on error and > 0 on success.
  38. * It should never return a value < 0.
  39. */
  40. static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
  41. int nents, enum dma_data_direction dir,
  42. struct dma_attrs *attrs)
  43. {
  44. struct dma_map_ops *ops = get_dma_ops(dev);
  45. int i, ents;
  46. struct scatterlist *s;
  47. for_each_sg(sg, s, nents, i)
  48. kmemcheck_mark_initialized(sg_virt(s), s->length);
  49. BUG_ON(!valid_dma_direction(dir));
  50. ents = ops->map_sg(dev, sg, nents, dir, attrs);
  51. BUG_ON(ents < 0);
  52. debug_dma_map_sg(dev, sg, nents, ents, dir);
  53. return ents;
  54. }
  55. static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
  56. int nents, enum dma_data_direction dir,
  57. struct dma_attrs *attrs)
  58. {
  59. struct dma_map_ops *ops = get_dma_ops(dev);
  60. BUG_ON(!valid_dma_direction(dir));
  61. debug_dma_unmap_sg(dev, sg, nents, dir);
  62. if (ops->unmap_sg)
  63. ops->unmap_sg(dev, sg, nents, dir, attrs);
  64. }
  65. static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  66. size_t offset, size_t size,
  67. enum dma_data_direction dir)
  68. {
  69. struct dma_map_ops *ops = get_dma_ops(dev);
  70. dma_addr_t addr;
  71. kmemcheck_mark_initialized(page_address(page) + offset, size);
  72. BUG_ON(!valid_dma_direction(dir));
  73. addr = ops->map_page(dev, page, offset, size, dir, NULL);
  74. debug_dma_map_page(dev, page, offset, size, dir, addr, false);
  75. return addr;
  76. }
  77. static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
  78. size_t size, enum dma_data_direction dir)
  79. {
  80. struct dma_map_ops *ops = get_dma_ops(dev);
  81. BUG_ON(!valid_dma_direction(dir));
  82. if (ops->unmap_page)
  83. ops->unmap_page(dev, addr, size, dir, NULL);
  84. debug_dma_unmap_page(dev, addr, size, dir, false);
  85. }
  86. static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
  87. size_t size,
  88. enum dma_data_direction dir)
  89. {
  90. struct dma_map_ops *ops = get_dma_ops(dev);
  91. BUG_ON(!valid_dma_direction(dir));
  92. if (ops->sync_single_for_cpu)
  93. ops->sync_single_for_cpu(dev, addr, size, dir);
  94. debug_dma_sync_single_for_cpu(dev, addr, size, dir);
  95. }
  96. static inline void dma_sync_single_for_device(struct device *dev,
  97. dma_addr_t addr, size_t size,
  98. enum dma_data_direction dir)
  99. {
  100. struct dma_map_ops *ops = get_dma_ops(dev);
  101. BUG_ON(!valid_dma_direction(dir));
  102. if (ops->sync_single_for_device)
  103. ops->sync_single_for_device(dev, addr, size, dir);
  104. debug_dma_sync_single_for_device(dev, addr, size, dir);
  105. }
  106. static inline void dma_sync_single_range_for_cpu(struct device *dev,
  107. dma_addr_t addr,
  108. unsigned long offset,
  109. size_t size,
  110. enum dma_data_direction dir)
  111. {
  112. const struct dma_map_ops *ops = get_dma_ops(dev);
  113. BUG_ON(!valid_dma_direction(dir));
  114. if (ops->sync_single_for_cpu)
  115. ops->sync_single_for_cpu(dev, addr + offset, size, dir);
  116. debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
  117. }
  118. static inline void dma_sync_single_range_for_device(struct device *dev,
  119. dma_addr_t addr,
  120. unsigned long offset,
  121. size_t size,
  122. enum dma_data_direction dir)
  123. {
  124. const struct dma_map_ops *ops = get_dma_ops(dev);
  125. BUG_ON(!valid_dma_direction(dir));
  126. if (ops->sync_single_for_device)
  127. ops->sync_single_for_device(dev, addr + offset, size, dir);
  128. debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
  129. }
  130. static inline void
  131. dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  132. int nelems, enum dma_data_direction dir)
  133. {
  134. struct dma_map_ops *ops = get_dma_ops(dev);
  135. BUG_ON(!valid_dma_direction(dir));
  136. if (ops->sync_sg_for_cpu)
  137. ops->sync_sg_for_cpu(dev, sg, nelems, dir);
  138. debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
  139. }
  140. static inline void
  141. dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  142. int nelems, enum dma_data_direction dir)
  143. {
  144. struct dma_map_ops *ops = get_dma_ops(dev);
  145. BUG_ON(!valid_dma_direction(dir));
  146. if (ops->sync_sg_for_device)
  147. ops->sync_sg_for_device(dev, sg, nelems, dir);
  148. debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
  149. }
  150. #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
  151. #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
  152. #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
  153. #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
  154. extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
  155. void *cpu_addr, dma_addr_t dma_addr, size_t size);
  156. void *dma_common_contiguous_remap(struct page *page, size_t size,
  157. unsigned long vm_flags,
  158. pgprot_t prot, const void *caller);
  159. void *dma_common_pages_remap(struct page **pages, size_t size,
  160. unsigned long vm_flags, pgprot_t prot,
  161. const void *caller);
  162. void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
  163. /**
  164. * dma_mmap_attrs - map a coherent DMA allocation into user space
  165. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  166. * @vma: vm_area_struct describing requested user mapping
  167. * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
  168. * @handle: device-view address returned from dma_alloc_attrs
  169. * @size: size of memory originally requested in dma_alloc_attrs
  170. * @attrs: attributes of mapping properties requested in dma_alloc_attrs
  171. *
  172. * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
  173. * into user space. The coherent DMA buffer must not be freed by the
  174. * driver until the user space mapping has been released.
  175. */
  176. static inline int
  177. dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
  178. dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
  179. {
  180. struct dma_map_ops *ops = get_dma_ops(dev);
  181. BUG_ON(!ops);
  182. if (ops->mmap)
  183. return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
  184. return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
  185. }
  186. #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
  187. int
  188. dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
  189. void *cpu_addr, dma_addr_t dma_addr, size_t size);
  190. static inline int
  191. dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
  192. dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
  193. {
  194. struct dma_map_ops *ops = get_dma_ops(dev);
  195. BUG_ON(!ops);
  196. if (ops->get_sgtable)
  197. return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
  198. attrs);
  199. return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
  200. }
  201. #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
  202. #endif