dvma.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Virtual DMA allocation
  4. *
  5. * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
  6. *
  7. * 11/26/2000 -- disabled the existing code because it didn't work for
  8. * me in 2.4. Replaced with a significantly more primitive version
  9. * similar to the sun3 code. the old functionality was probably more
  10. * desirable, but.... -- Sam Creasey (sammy@oh.verio.com)
  11. *
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/init.h>
  15. #include <linux/bitops.h>
  16. #include <linux/mm.h>
  17. #include <linux/bootmem.h>
  18. #include <linux/vmalloc.h>
  19. #include <asm/sun3x.h>
  20. #include <asm/dvma.h>
  21. #include <asm/io.h>
  22. #include <asm/page.h>
  23. #include <asm/pgtable.h>
  24. #include <asm/pgalloc.h>
  25. /* IOMMU support */
  26. #define IOMMU_ADDR_MASK 0x03ffe000
  27. #define IOMMU_CACHE_INHIBIT 0x00000040
  28. #define IOMMU_FULL_BLOCK 0x00000020
  29. #define IOMMU_MODIFIED 0x00000010
  30. #define IOMMU_USED 0x00000008
  31. #define IOMMU_WRITE_PROTECT 0x00000004
  32. #define IOMMU_DT_MASK 0x00000003
  33. #define IOMMU_DT_INVALID 0x00000000
  34. #define IOMMU_DT_VALID 0x00000001
  35. #define IOMMU_DT_BAD 0x00000002
  36. static volatile unsigned long *iommu_pte = (unsigned long *)SUN3X_IOMMU;
  37. #define dvma_entry_paddr(index) (iommu_pte[index] & IOMMU_ADDR_MASK)
  38. #define dvma_entry_vaddr(index,paddr) ((index << DVMA_PAGE_SHIFT) | \
  39. (paddr & (DVMA_PAGE_SIZE-1)))
  40. #if 0
  41. #define dvma_entry_set(index,addr) (iommu_pte[index] = \
  42. (addr & IOMMU_ADDR_MASK) | \
  43. IOMMU_DT_VALID | IOMMU_CACHE_INHIBIT)
  44. #else
  45. #define dvma_entry_set(index,addr) (iommu_pte[index] = \
  46. (addr & IOMMU_ADDR_MASK) | \
  47. IOMMU_DT_VALID)
  48. #endif
  49. #define dvma_entry_clr(index) (iommu_pte[index] = IOMMU_DT_INVALID)
  50. #define dvma_entry_hash(addr) ((addr >> DVMA_PAGE_SHIFT) ^ \
  51. ((addr & 0x03c00000) >> \
  52. (DVMA_PAGE_SHIFT+4)))
  53. #ifdef DEBUG
  54. /* code to print out a dvma mapping for debugging purposes */
  55. void dvma_print (unsigned long dvma_addr)
  56. {
  57. unsigned long index;
  58. index = dvma_addr >> DVMA_PAGE_SHIFT;
  59. pr_info("idx %lx dvma_addr %08lx paddr %08lx\n", index, dvma_addr,
  60. dvma_entry_paddr(index));
  61. }
  62. #endif
  63. /* create a virtual mapping for a page assigned within the IOMMU
  64. so that the cpu can reach it easily */
  65. inline int dvma_map_cpu(unsigned long kaddr,
  66. unsigned long vaddr, int len)
  67. {
  68. pgd_t *pgd;
  69. unsigned long end;
  70. int ret = 0;
  71. kaddr &= PAGE_MASK;
  72. vaddr &= PAGE_MASK;
  73. end = PAGE_ALIGN(vaddr + len);
  74. pr_debug("dvma: mapping kern %08lx to virt %08lx\n", kaddr, vaddr);
  75. pgd = pgd_offset_k(vaddr);
  76. do {
  77. pmd_t *pmd;
  78. unsigned long end2;
  79. if((pmd = pmd_alloc(&init_mm, pgd, vaddr)) == NULL) {
  80. ret = -ENOMEM;
  81. goto out;
  82. }
  83. if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK))
  84. end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK;
  85. else
  86. end2 = end;
  87. do {
  88. pte_t *pte;
  89. unsigned long end3;
  90. if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) {
  91. ret = -ENOMEM;
  92. goto out;
  93. }
  94. if((end2 & PMD_MASK) > (vaddr & PMD_MASK))
  95. end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK;
  96. else
  97. end3 = end2;
  98. do {
  99. pr_debug("mapping %08lx phys to %08lx\n",
  100. __pa(kaddr), vaddr);
  101. set_pte(pte, pfn_pte(virt_to_pfn(kaddr),
  102. PAGE_KERNEL));
  103. pte++;
  104. kaddr += PAGE_SIZE;
  105. vaddr += PAGE_SIZE;
  106. } while(vaddr < end3);
  107. } while(vaddr < end2);
  108. } while(vaddr < end);
  109. flush_tlb_all();
  110. out:
  111. return ret;
  112. }
  113. inline int dvma_map_iommu(unsigned long kaddr, unsigned long baddr,
  114. int len)
  115. {
  116. unsigned long end, index;
  117. index = baddr >> DVMA_PAGE_SHIFT;
  118. end = ((baddr+len) >> DVMA_PAGE_SHIFT);
  119. if(len & ~DVMA_PAGE_MASK)
  120. end++;
  121. for(; index < end ; index++) {
  122. // if(dvma_entry_use(index))
  123. // BUG();
  124. // pr_info("mapping pa %lx to ba %lx\n", __pa(kaddr),
  125. // index << DVMA_PAGE_SHIFT);
  126. dvma_entry_set(index, __pa(kaddr));
  127. iommu_pte[index] |= IOMMU_FULL_BLOCK;
  128. // dvma_entry_inc(index);
  129. kaddr += DVMA_PAGE_SIZE;
  130. }
  131. #ifdef DEBUG
  132. for(index = (baddr >> DVMA_PAGE_SHIFT); index < end; index++)
  133. dvma_print(index << DVMA_PAGE_SHIFT);
  134. #endif
  135. return 0;
  136. }
  137. void dvma_unmap_iommu(unsigned long baddr, int len)
  138. {
  139. int index, end;
  140. index = baddr >> DVMA_PAGE_SHIFT;
  141. end = (DVMA_PAGE_ALIGN(baddr+len) >> DVMA_PAGE_SHIFT);
  142. for(; index < end ; index++) {
  143. pr_debug("freeing bus mapping %08x\n",
  144. index << DVMA_PAGE_SHIFT);
  145. #if 0
  146. if(!dvma_entry_use(index))
  147. pr_info("dvma_unmap freeing unused entry %04x\n",
  148. index);
  149. else
  150. dvma_entry_dec(index);
  151. #endif
  152. dvma_entry_clr(index);
  153. }
  154. }