dma-mapping-nommu.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. /*
  2. * Based on linux/arch/arm/mm/dma-mapping.c
  3. *
  4. * Copyright (C) 2000-2004 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. */
  11. #include <linux/export.h>
  12. #include <linux/mm.h>
  13. #include <linux/dma-direct.h>
  14. #include <linux/scatterlist.h>
  15. #include <asm/cachetype.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/outercache.h>
  18. #include <asm/cp15.h>
  19. #include "dma.h"
  20. /*
  21. * dma_direct_ops is used if
  22. * - MMU/MPU is off
  23. * - cpu is v7m w/o cache support
  24. * - device is coherent
  25. * otherwise arm_nommu_dma_ops is used.
  26. *
  27. * arm_nommu_dma_ops rely on consistent DMA memory (please, refer to
  28. * [1] on how to declare such memory).
  29. *
  30. * [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
  31. */
  32. static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
  33. dma_addr_t *dma_handle, gfp_t gfp,
  34. unsigned long attrs)
  35. {
  36. void *ret;
  37. /*
  38. * Try generic allocator first if we are advertised that
  39. * consistency is not required.
  40. */
  41. if (attrs & DMA_ATTR_NON_CONSISTENT)
  42. return dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
  43. ret = dma_alloc_from_global_coherent(size, dma_handle);
  44. /*
  45. * dma_alloc_from_global_coherent() may fail because:
  46. *
  47. * - no consistent DMA region has been defined, so we can't
  48. * continue.
  49. * - there is no space left in consistent DMA region, so we
  50. * only can fallback to generic allocator if we are
  51. * advertised that consistency is not required.
  52. */
  53. WARN_ON_ONCE(ret == NULL);
  54. return ret;
  55. }
  56. static void arm_nommu_dma_free(struct device *dev, size_t size,
  57. void *cpu_addr, dma_addr_t dma_addr,
  58. unsigned long attrs)
  59. {
  60. if (attrs & DMA_ATTR_NON_CONSISTENT) {
  61. dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
  62. } else {
  63. int ret = dma_release_from_global_coherent(get_order(size),
  64. cpu_addr);
  65. WARN_ON_ONCE(ret == 0);
  66. }
  67. return;
  68. }
  69. static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
  70. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  71. unsigned long attrs)
  72. {
  73. int ret;
  74. if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
  75. return ret;
  76. return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
  77. }
  78. static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
  79. enum dma_data_direction dir)
  80. {
  81. dmac_map_area(__va(paddr), size, dir);
  82. if (dir == DMA_FROM_DEVICE)
  83. outer_inv_range(paddr, paddr + size);
  84. else
  85. outer_clean_range(paddr, paddr + size);
  86. }
  87. static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size,
  88. enum dma_data_direction dir)
  89. {
  90. if (dir != DMA_TO_DEVICE) {
  91. outer_inv_range(paddr, paddr + size);
  92. dmac_unmap_area(__va(paddr), size, dir);
  93. }
  94. }
  95. static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page,
  96. unsigned long offset, size_t size,
  97. enum dma_data_direction dir,
  98. unsigned long attrs)
  99. {
  100. dma_addr_t handle = page_to_phys(page) + offset;
  101. __dma_page_cpu_to_dev(handle, size, dir);
  102. return handle;
  103. }
  104. static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
  105. size_t size, enum dma_data_direction dir,
  106. unsigned long attrs)
  107. {
  108. __dma_page_dev_to_cpu(handle, size, dir);
  109. }
  110. static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl,
  111. int nents, enum dma_data_direction dir,
  112. unsigned long attrs)
  113. {
  114. int i;
  115. struct scatterlist *sg;
  116. for_each_sg(sgl, sg, nents, i) {
  117. sg_dma_address(sg) = sg_phys(sg);
  118. sg_dma_len(sg) = sg->length;
  119. __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
  120. }
  121. return nents;
  122. }
  123. static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
  124. int nents, enum dma_data_direction dir,
  125. unsigned long attrs)
  126. {
  127. struct scatterlist *sg;
  128. int i;
  129. for_each_sg(sgl, sg, nents, i)
  130. __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
  131. }
  132. static void arm_nommu_dma_sync_single_for_device(struct device *dev,
  133. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  134. {
  135. __dma_page_cpu_to_dev(handle, size, dir);
  136. }
  137. static void arm_nommu_dma_sync_single_for_cpu(struct device *dev,
  138. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  139. {
  140. __dma_page_cpu_to_dev(handle, size, dir);
  141. }
  142. static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
  143. int nents, enum dma_data_direction dir)
  144. {
  145. struct scatterlist *sg;
  146. int i;
  147. for_each_sg(sgl, sg, nents, i)
  148. __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
  149. }
  150. static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
  151. int nents, enum dma_data_direction dir)
  152. {
  153. struct scatterlist *sg;
  154. int i;
  155. for_each_sg(sgl, sg, nents, i)
  156. __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
  157. }
  158. const struct dma_map_ops arm_nommu_dma_ops = {
  159. .alloc = arm_nommu_dma_alloc,
  160. .free = arm_nommu_dma_free,
  161. .mmap = arm_nommu_dma_mmap,
  162. .map_page = arm_nommu_dma_map_page,
  163. .unmap_page = arm_nommu_dma_unmap_page,
  164. .map_sg = arm_nommu_dma_map_sg,
  165. .unmap_sg = arm_nommu_dma_unmap_sg,
  166. .sync_single_for_device = arm_nommu_dma_sync_single_for_device,
  167. .sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu,
  168. .sync_sg_for_device = arm_nommu_dma_sync_sg_for_device,
  169. .sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu,
  170. };
  171. EXPORT_SYMBOL(arm_nommu_dma_ops);
  172. static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent)
  173. {
  174. return coherent ? &dma_direct_ops : &arm_nommu_dma_ops;
  175. }
  176. void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
  177. const struct iommu_ops *iommu, bool coherent)
  178. {
  179. const struct dma_map_ops *dma_ops;
  180. if (IS_ENABLED(CONFIG_CPU_V7M)) {
  181. /*
  182. * Cache support for v7m is optional, so can be treated as
  183. * coherent if no cache has been detected. Note that it is not
  184. * enough to check if MPU is in use or not since in absense of
  185. * MPU system memory map is used.
  186. */
  187. dev->archdata.dma_coherent = (cacheid) ? coherent : true;
  188. } else {
  189. /*
  190. * Assume coherent DMA in case MMU/MPU has not been set up.
  191. */
  192. dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
  193. }
  194. dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent);
  195. set_dma_ops(dev, dma_ops);
  196. }
  197. void arch_teardown_dma_ops(struct device *dev)
  198. {
  199. }