dma.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. /*
  2. * Copyright (C) 2009-2010 PetaLogix
  3. * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
  4. *
  5. * Provide default implementations of the DMA mapping callbacks for
  6. * directly mapped busses.
  7. */
  8. #include <linux/device.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/gfp.h>
  11. #include <linux/dma-debug.h>
  12. #include <linux/export.h>
  13. #include <linux/bug.h>
  14. #define NOT_COHERENT_CACHE
  15. static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
  16. dma_addr_t *dma_handle, gfp_t flag,
  17. struct dma_attrs *attrs)
  18. {
  19. #ifdef NOT_COHERENT_CACHE
  20. return consistent_alloc(flag, size, dma_handle);
  21. #else
  22. void *ret;
  23. struct page *page;
  24. int node = dev_to_node(dev);
  25. /* ignore region specifiers */
  26. flag &= ~(__GFP_HIGHMEM);
  27. page = alloc_pages_node(node, flag, get_order(size));
  28. if (page == NULL)
  29. return NULL;
  30. ret = page_address(page);
  31. memset(ret, 0, size);
  32. *dma_handle = virt_to_phys(ret);
  33. return ret;
  34. #endif
  35. }
  36. static void dma_direct_free_coherent(struct device *dev, size_t size,
  37. void *vaddr, dma_addr_t dma_handle,
  38. struct dma_attrs *attrs)
  39. {
  40. #ifdef NOT_COHERENT_CACHE
  41. consistent_free(size, vaddr);
  42. #else
  43. free_pages((unsigned long)vaddr, get_order(size));
  44. #endif
  45. }
  46. static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
  47. int nents, enum dma_data_direction direction,
  48. struct dma_attrs *attrs)
  49. {
  50. struct scatterlist *sg;
  51. int i;
  52. /* FIXME this part of code is untested */
  53. for_each_sg(sgl, sg, nents, i) {
  54. sg->dma_address = sg_phys(sg);
  55. __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
  56. sg->length, direction);
  57. }
  58. return nents;
  59. }
  60. static int dma_direct_dma_supported(struct device *dev, u64 mask)
  61. {
  62. return 1;
  63. }
  64. static inline dma_addr_t dma_direct_map_page(struct device *dev,
  65. struct page *page,
  66. unsigned long offset,
  67. size_t size,
  68. enum dma_data_direction direction,
  69. struct dma_attrs *attrs)
  70. {
  71. __dma_sync(page_to_phys(page) + offset, size, direction);
  72. return page_to_phys(page) + offset;
  73. }
  74. static inline void dma_direct_unmap_page(struct device *dev,
  75. dma_addr_t dma_address,
  76. size_t size,
  77. enum dma_data_direction direction,
  78. struct dma_attrs *attrs)
  79. {
  80. /* There is not necessary to do cache cleanup
  81. *
  82. * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
  83. * dma_address is physical address
  84. */
  85. __dma_sync(dma_address, size, direction);
  86. }
  87. static inline void
  88. dma_direct_sync_single_for_cpu(struct device *dev,
  89. dma_addr_t dma_handle, size_t size,
  90. enum dma_data_direction direction)
  91. {
  92. /*
  93. * It's pointless to flush the cache as the memory segment
  94. * is given to the CPU
  95. */
  96. if (direction == DMA_FROM_DEVICE)
  97. __dma_sync(dma_handle, size, direction);
  98. }
  99. static inline void
  100. dma_direct_sync_single_for_device(struct device *dev,
  101. dma_addr_t dma_handle, size_t size,
  102. enum dma_data_direction direction)
  103. {
  104. /*
  105. * It's pointless to invalidate the cache if the device isn't
  106. * supposed to write to the relevant region
  107. */
  108. if (direction == DMA_TO_DEVICE)
  109. __dma_sync(dma_handle, size, direction);
  110. }
  111. static inline void
  112. dma_direct_sync_sg_for_cpu(struct device *dev,
  113. struct scatterlist *sgl, int nents,
  114. enum dma_data_direction direction)
  115. {
  116. struct scatterlist *sg;
  117. int i;
  118. /* FIXME this part of code is untested */
  119. if (direction == DMA_FROM_DEVICE)
  120. for_each_sg(sgl, sg, nents, i)
  121. __dma_sync(sg->dma_address, sg->length, direction);
  122. }
  123. static inline void
  124. dma_direct_sync_sg_for_device(struct device *dev,
  125. struct scatterlist *sgl, int nents,
  126. enum dma_data_direction direction)
  127. {
  128. struct scatterlist *sg;
  129. int i;
  130. /* FIXME this part of code is untested */
  131. if (direction == DMA_TO_DEVICE)
  132. for_each_sg(sgl, sg, nents, i)
  133. __dma_sync(sg->dma_address, sg->length, direction);
  134. }
  135. static
  136. int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
  137. void *cpu_addr, dma_addr_t handle, size_t size,
  138. struct dma_attrs *attrs)
  139. {
  140. #ifdef CONFIG_MMU
  141. unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  142. unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
  143. unsigned long off = vma->vm_pgoff;
  144. unsigned long pfn;
  145. if (off >= count || user_count > (count - off))
  146. return -ENXIO;
  147. #ifdef NOT_COHERENT_CACHE
  148. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  149. pfn = consistent_virt_to_pfn(cpu_addr);
  150. #else
  151. pfn = virt_to_pfn(cpu_addr);
  152. #endif
  153. return remap_pfn_range(vma, vma->vm_start, pfn + off,
  154. vma->vm_end - vma->vm_start, vma->vm_page_prot);
  155. #else
  156. return -ENXIO;
  157. #endif
  158. }
  159. struct dma_map_ops dma_direct_ops = {
  160. .alloc = dma_direct_alloc_coherent,
  161. .free = dma_direct_free_coherent,
  162. .mmap = dma_direct_mmap_coherent,
  163. .map_sg = dma_direct_map_sg,
  164. .dma_supported = dma_direct_dma_supported,
  165. .map_page = dma_direct_map_page,
  166. .unmap_page = dma_direct_unmap_page,
  167. .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
  168. .sync_single_for_device = dma_direct_sync_single_for_device,
  169. .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
  170. .sync_sg_for_device = dma_direct_sync_sg_for_device,
  171. };
  172. EXPORT_SYMBOL(dma_direct_ops);
  173. /* Number of entries preallocated for DMA-API debugging */
  174. #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
  175. static int __init dma_init(void)
  176. {
  177. dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
  178. return 0;
  179. }
  180. fs_initcall(dma_init);