dma.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /*
  2. * OpenRISC Linux
  3. *
  4. * Linux architectural port borrowing liberally from similar works of
  5. * others. All original copyrights apply as per the original source
  6. * declaration.
  7. *
  8. * Modifications for the OpenRISC architecture:
  9. * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
  10. * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. *
  17. * DMA mapping callbacks...
  18. * As alloc_coherent is the only DMA callback being used currently, that's
  19. * the only thing implemented properly. The rest need looking into...
  20. */
  21. #include <linux/dma-noncoherent.h>
  22. #include <asm/cpuinfo.h>
  23. #include <asm/spr_defs.h>
  24. #include <asm/tlbflush.h>
  25. static int
  26. page_set_nocache(pte_t *pte, unsigned long addr,
  27. unsigned long next, struct mm_walk *walk)
  28. {
  29. unsigned long cl;
  30. struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
  31. pte_val(*pte) |= _PAGE_CI;
  32. /*
  33. * Flush the page out of the TLB so that the new page flags get
  34. * picked up next time there's an access
  35. */
  36. flush_tlb_page(NULL, addr);
  37. /* Flush page out of dcache */
  38. for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
  39. mtspr(SPR_DCBFR, cl);
  40. return 0;
  41. }
  42. static int
  43. page_clear_nocache(pte_t *pte, unsigned long addr,
  44. unsigned long next, struct mm_walk *walk)
  45. {
  46. pte_val(*pte) &= ~_PAGE_CI;
  47. /*
  48. * Flush the page out of the TLB so that the new page flags get
  49. * picked up next time there's an access
  50. */
  51. flush_tlb_page(NULL, addr);
  52. return 0;
  53. }
  54. /*
  55. * Alloc "coherent" memory, which for OpenRISC means simply uncached.
  56. *
  57. * This function effectively just calls __get_free_pages, sets the
  58. * cache-inhibit bit on those pages, and makes sure that the pages are
  59. * flushed out of the cache before they are used.
  60. *
  61. * If the NON_CONSISTENT attribute is set, then this function just
  62. * returns "normal", cachable memory.
  63. *
  64. * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
  65. * into consideration here, too. All current known implementations of
  66. * the OR1K support only strongly ordered memory accesses, so that flag
  67. * is being ignored for now; uncached but write-combined memory is a
  68. * missing feature of the OR1K.
  69. */
  70. void *
  71. arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
  72. gfp_t gfp, unsigned long attrs)
  73. {
  74. unsigned long va;
  75. void *page;
  76. struct mm_walk walk = {
  77. .pte_entry = page_set_nocache,
  78. .mm = &init_mm
  79. };
  80. page = alloc_pages_exact(size, gfp);
  81. if (!page)
  82. return NULL;
  83. /* This gives us the real physical address of the first page. */
  84. *dma_handle = __pa(page);
  85. va = (unsigned long)page;
  86. if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
  87. /*
  88. * We need to iterate through the pages, clearing the dcache for
  89. * them and setting the cache-inhibit bit.
  90. */
  91. if (walk_page_range(va, va + size, &walk)) {
  92. free_pages_exact(page, size);
  93. return NULL;
  94. }
  95. }
  96. return (void *)va;
  97. }
  98. void
  99. arch_dma_free(struct device *dev, size_t size, void *vaddr,
  100. dma_addr_t dma_handle, unsigned long attrs)
  101. {
  102. unsigned long va = (unsigned long)vaddr;
  103. struct mm_walk walk = {
  104. .pte_entry = page_clear_nocache,
  105. .mm = &init_mm
  106. };
  107. if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
  108. /* walk_page_range shouldn't be able to fail here */
  109. WARN_ON(walk_page_range(va, va + size, &walk));
  110. }
  111. free_pages_exact(vaddr, size);
  112. }
  113. void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size,
  114. enum dma_data_direction dir)
  115. {
  116. unsigned long cl;
  117. struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
  118. switch (dir) {
  119. case DMA_TO_DEVICE:
  120. /* Flush the dcache for the requested range */
  121. for (cl = addr; cl < addr + size;
  122. cl += cpuinfo->dcache_block_size)
  123. mtspr(SPR_DCBFR, cl);
  124. break;
  125. case DMA_FROM_DEVICE:
  126. /* Invalidate the dcache for the requested range */
  127. for (cl = addr; cl < addr + size;
  128. cl += cpuinfo->dcache_block_size)
  129. mtspr(SPR_DCBIR, cl);
  130. break;
  131. default:
  132. /*
  133. * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
  134. * flush nor invalidate the cache here as the area will need
  135. * to be manually synced anyway.
  136. */
  137. break;
  138. }
  139. }