ioremap.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /*
  2. * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
  3. * Copyright (C) 2009 Wind River Systems Inc
  4. * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
  5. * Copyright (C) 2004 Microtronix Datacom Ltd.
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. */
  11. #include <linux/export.h>
  12. #include <linux/sched.h>
  13. #include <linux/mm.h>
  14. #include <linux/slab.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/io.h>
  17. #include <asm/cacheflush.h>
  18. #include <asm/tlbflush.h>
  19. static inline void remap_area_pte(pte_t *pte, unsigned long address,
  20. unsigned long size, unsigned long phys_addr,
  21. unsigned long flags)
  22. {
  23. unsigned long end;
  24. unsigned long pfn;
  25. pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ
  26. | _PAGE_WRITE | flags);
  27. address &= ~PMD_MASK;
  28. end = address + size;
  29. if (end > PMD_SIZE)
  30. end = PMD_SIZE;
  31. if (address >= end)
  32. BUG();
  33. pfn = PFN_DOWN(phys_addr);
  34. do {
  35. if (!pte_none(*pte)) {
  36. pr_err("remap_area_pte: page already exists\n");
  37. BUG();
  38. }
  39. set_pte(pte, pfn_pte(pfn, pgprot));
  40. address += PAGE_SIZE;
  41. pfn++;
  42. pte++;
  43. } while (address && (address < end));
  44. }
  45. static inline int remap_area_pmd(pmd_t *pmd, unsigned long address,
  46. unsigned long size, unsigned long phys_addr,
  47. unsigned long flags)
  48. {
  49. unsigned long end;
  50. address &= ~PGDIR_MASK;
  51. end = address + size;
  52. if (end > PGDIR_SIZE)
  53. end = PGDIR_SIZE;
  54. phys_addr -= address;
  55. if (address >= end)
  56. BUG();
  57. do {
  58. pte_t *pte = pte_alloc_kernel(pmd, address);
  59. if (!pte)
  60. return -ENOMEM;
  61. remap_area_pte(pte, address, end - address, address + phys_addr,
  62. flags);
  63. address = (address + PMD_SIZE) & PMD_MASK;
  64. pmd++;
  65. } while (address && (address < end));
  66. return 0;
  67. }
  68. static int remap_area_pages(unsigned long address, unsigned long phys_addr,
  69. unsigned long size, unsigned long flags)
  70. {
  71. int error;
  72. pgd_t *dir;
  73. unsigned long end = address + size;
  74. phys_addr -= address;
  75. dir = pgd_offset(&init_mm, address);
  76. flush_cache_all();
  77. if (address >= end)
  78. BUG();
  79. do {
  80. pud_t *pud;
  81. pmd_t *pmd;
  82. error = -ENOMEM;
  83. pud = pud_alloc(&init_mm, dir, address);
  84. if (!pud)
  85. break;
  86. pmd = pmd_alloc(&init_mm, pud, address);
  87. if (!pmd)
  88. break;
  89. if (remap_area_pmd(pmd, address, end - address,
  90. phys_addr + address, flags))
  91. break;
  92. error = 0;
  93. address = (address + PGDIR_SIZE) & PGDIR_MASK;
  94. dir++;
  95. } while (address && (address < end));
  96. flush_tlb_all();
  97. return error;
  98. }
  99. #define IS_MAPPABLE_UNCACHEABLE(addr) (addr < 0x20000000UL)
  100. /*
  101. * Map some physical address range into the kernel address space.
  102. */
  103. void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
  104. unsigned long cacheflag)
  105. {
  106. struct vm_struct *area;
  107. unsigned long offset;
  108. unsigned long last_addr;
  109. void *addr;
  110. /* Don't allow wraparound or zero size */
  111. last_addr = phys_addr + size - 1;
  112. if (!size || last_addr < phys_addr)
  113. return NULL;
  114. /* Don't allow anybody to remap normal RAM that we're using */
  115. if (phys_addr > PHYS_OFFSET && phys_addr < virt_to_phys(high_memory)) {
  116. char *t_addr, *t_end;
  117. struct page *page;
  118. t_addr = __va(phys_addr);
  119. t_end = t_addr + (size - 1);
  120. for (page = virt_to_page(t_addr);
  121. page <= virt_to_page(t_end); page++)
  122. if (!PageReserved(page))
  123. return NULL;
  124. }
  125. /*
  126. * Map uncached objects in the low part of address space to
  127. * CONFIG_NIOS2_IO_REGION_BASE
  128. */
  129. if (IS_MAPPABLE_UNCACHEABLE(phys_addr) &&
  130. IS_MAPPABLE_UNCACHEABLE(last_addr) &&
  131. !(cacheflag & _PAGE_CACHED))
  132. return (void __iomem *)(CONFIG_NIOS2_IO_REGION_BASE + phys_addr);
  133. /* Mappings have to be page-aligned */
  134. offset = phys_addr & ~PAGE_MASK;
  135. phys_addr &= PAGE_MASK;
  136. size = PAGE_ALIGN(last_addr + 1) - phys_addr;
  137. /* Ok, go for it */
  138. area = get_vm_area(size, VM_IOREMAP);
  139. if (!area)
  140. return NULL;
  141. addr = area->addr;
  142. if (remap_area_pages((unsigned long) addr, phys_addr, size,
  143. cacheflag)) {
  144. vunmap(addr);
  145. return NULL;
  146. }
  147. return (void __iomem *) (offset + (char *)addr);
  148. }
  149. EXPORT_SYMBOL(__ioremap);
  150. /*
  151. * __iounmap unmaps nearly everything, so be careful
  152. * it doesn't free currently pointer/page tables anymore but it
  153. * wasn't used anyway and might be added later.
  154. */
  155. void __iounmap(void __iomem *addr)
  156. {
  157. struct vm_struct *p;
  158. if ((unsigned long) addr > CONFIG_NIOS2_IO_REGION_BASE)
  159. return;
  160. p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
  161. if (!p)
  162. pr_err("iounmap: bad address %p\n", addr);
  163. kfree(p);
  164. }
  165. EXPORT_SYMBOL(__iounmap);