dma.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/dma-noncoherent.h>
  9. #include <asm/cache.h>
  10. #include <asm/cacheflush.h>
  11. /*
  12. * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
  13. * - hardware IOC not available (or "dma-coherent" not set for device in DT)
  14. * - But still handle both coherent and non-coherent requests from caller
  15. *
  16. * For DMA coherent hardware (IOC) generic code suffices
  17. */
  18. void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
  19. gfp_t gfp, unsigned long attrs)
  20. {
  21. unsigned long order = get_order(size);
  22. struct page *page;
  23. phys_addr_t paddr;
  24. void *kvaddr;
  25. bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
  26. /*
  27. * __GFP_HIGHMEM flag is cleared by upper layer functions
  28. * (in include/linux/dma-mapping.h) so we should never get a
  29. * __GFP_HIGHMEM here.
  30. */
  31. BUG_ON(gfp & __GFP_HIGHMEM);
  32. page = alloc_pages(gfp, order);
  33. if (!page)
  34. return NULL;
  35. /* This is linear addr (0x8000_0000 based) */
  36. paddr = page_to_phys(page);
  37. *dma_handle = paddr;
  38. /*
  39. * A coherent buffer needs MMU mapping to enforce non-cachability.
  40. * kvaddr is kernel Virtual address (0x7000_0000 based).
  41. */
  42. if (need_coh) {
  43. kvaddr = ioremap_nocache(paddr, size);
  44. if (kvaddr == NULL) {
  45. __free_pages(page, order);
  46. return NULL;
  47. }
  48. } else {
  49. kvaddr = (void *)(u32)paddr;
  50. }
  51. /*
  52. * Evict any existing L1 and/or L2 lines for the backing page
  53. * in case it was used earlier as a normal "cached" page.
  54. * Yeah this bit us - STAR 9000898266
  55. *
  56. * Although core does call flush_cache_vmap(), it gets kvaddr hence
  57. * can't be used to efficiently flush L1 and/or L2 which need paddr
  58. * Currently flush_cache_vmap nukes the L1 cache completely which
  59. * will be optimized as a separate commit
  60. */
  61. if (need_coh)
  62. dma_cache_wback_inv(paddr, size);
  63. return kvaddr;
  64. }
  65. void arch_dma_free(struct device *dev, size_t size, void *vaddr,
  66. dma_addr_t dma_handle, unsigned long attrs)
  67. {
  68. phys_addr_t paddr = dma_handle;
  69. struct page *page = virt_to_page(paddr);
  70. if (!(attrs & DMA_ATTR_NON_CONSISTENT))
  71. iounmap((void __force __iomem *)vaddr);
  72. __free_pages(page, get_order(size));
  73. }
  74. int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
  75. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  76. unsigned long attrs)
  77. {
  78. unsigned long user_count = vma_pages(vma);
  79. unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
  80. unsigned long pfn = __phys_to_pfn(dma_addr);
  81. unsigned long off = vma->vm_pgoff;
  82. int ret = -ENXIO;
  83. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  84. if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
  85. return ret;
  86. if (off < count && user_count <= (count - off)) {
  87. ret = remap_pfn_range(vma, vma->vm_start,
  88. pfn + off,
  89. user_count << PAGE_SHIFT,
  90. vma->vm_page_prot);
  91. }
  92. return ret;
  93. }
  94. /*
  95. * Cache operations depending on function and direction argument, inspired by
  96. * https://lkml.org/lkml/2018/5/18/979
  97. * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
  98. * dma-mapping: provide a generic dma-noncoherent implementation)"
  99. *
  100. * | map == for_device | unmap == for_cpu
  101. * |----------------------------------------------------------------
  102. * TO_DEV | writeback writeback | none none
  103. * FROM_DEV | invalidate invalidate | invalidate* invalidate*
  104. * BIDIR | writeback+inv writeback+inv | invalidate invalidate
  105. *
  106. * [*] needed for CPU speculative prefetches
  107. *
  108. * NOTE: we don't check the validity of direction argument as it is done in
  109. * upper layer functions (in include/linux/dma-mapping.h)
  110. */
  111. void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
  112. size_t size, enum dma_data_direction dir)
  113. {
  114. switch (dir) {
  115. case DMA_TO_DEVICE:
  116. dma_cache_wback(paddr, size);
  117. break;
  118. case DMA_FROM_DEVICE:
  119. dma_cache_inv(paddr, size);
  120. break;
  121. case DMA_BIDIRECTIONAL:
  122. dma_cache_wback_inv(paddr, size);
  123. break;
  124. default:
  125. break;
  126. }
  127. }
  128. void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
  129. size_t size, enum dma_data_direction dir)
  130. {
  131. switch (dir) {
  132. case DMA_TO_DEVICE:
  133. break;
  134. /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
  135. case DMA_FROM_DEVICE:
  136. case DMA_BIDIRECTIONAL:
  137. dma_cache_inv(paddr, size);
  138. break;
  139. default:
  140. break;
  141. }
  142. }
  143. /*
  144. * Plug in coherent or noncoherent dma ops
  145. */
  146. void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
  147. const struct iommu_ops *iommu, bool coherent)
  148. {
  149. /*
  150. * IOC hardware snoops all DMA traffic keeping the caches consistent
  151. * with memory - eliding need for any explicit cache maintenance of
  152. * DMA buffers - so we can use dma_direct cache ops.
  153. */
  154. if (is_isa_arcv2() && ioc_enable && coherent) {
  155. set_dma_ops(dev, &dma_direct_ops);
  156. dev_info(dev, "use dma_direct_ops cache ops\n");
  157. } else {
  158. set_dma_ops(dev, &dma_noncoherent_ops);
  159. dev_info(dev, "use dma_noncoherent_ops cache ops\n");
  160. }
  161. }