ioremap.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * (C) Copyright 1995 1996 Linus Torvalds
  7. * (C) Copyright 2001, 2002 Ralf Baechle
  8. */
  9. #include <linux/module.h>
  10. #include <asm/addrspace.h>
  11. #include <asm/byteorder.h>
  12. #include <linux/sched.h>
  13. #include <linux/slab.h>
  14. #include <linux/vmalloc.h>
  15. #include <asm/cacheflush.h>
  16. #include <asm/io.h>
  17. #include <asm/tlbflush.h>
  18. static inline void remap_area_pte(pte_t * pte, unsigned long address,
  19. phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
  20. {
  21. phys_addr_t end;
  22. unsigned long pfn;
  23. pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
  24. | __WRITEABLE | flags);
  25. address &= ~PMD_MASK;
  26. end = address + size;
  27. if (end > PMD_SIZE)
  28. end = PMD_SIZE;
  29. BUG_ON(address >= end);
  30. pfn = phys_addr >> PAGE_SHIFT;
  31. do {
  32. if (!pte_none(*pte)) {
  33. printk("remap_area_pte: page already exists\n");
  34. BUG();
  35. }
  36. set_pte(pte, pfn_pte(pfn, pgprot));
  37. address += PAGE_SIZE;
  38. pfn++;
  39. pte++;
  40. } while (address && (address < end));
  41. }
  42. static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
  43. phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
  44. {
  45. phys_addr_t end;
  46. address &= ~PGDIR_MASK;
  47. end = address + size;
  48. if (end > PGDIR_SIZE)
  49. end = PGDIR_SIZE;
  50. phys_addr -= address;
  51. BUG_ON(address >= end);
  52. do {
  53. pte_t * pte = pte_alloc_kernel(pmd, address);
  54. if (!pte)
  55. return -ENOMEM;
  56. remap_area_pte(pte, address, end - address, address + phys_addr, flags);
  57. address = (address + PMD_SIZE) & PMD_MASK;
  58. pmd++;
  59. } while (address && (address < end));
  60. return 0;
  61. }
  62. static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
  63. phys_addr_t size, unsigned long flags)
  64. {
  65. int error;
  66. pgd_t * dir;
  67. unsigned long end = address + size;
  68. phys_addr -= address;
  69. dir = pgd_offset(&init_mm, address);
  70. flush_cache_all();
  71. BUG_ON(address >= end);
  72. do {
  73. pud_t *pud;
  74. pmd_t *pmd;
  75. error = -ENOMEM;
  76. pud = pud_alloc(&init_mm, dir, address);
  77. if (!pud)
  78. break;
  79. pmd = pmd_alloc(&init_mm, pud, address);
  80. if (!pmd)
  81. break;
  82. if (remap_area_pmd(pmd, address, end - address,
  83. phys_addr + address, flags))
  84. break;
  85. error = 0;
  86. address = (address + PGDIR_SIZE) & PGDIR_MASK;
  87. dir++;
  88. } while (address && (address < end));
  89. flush_tlb_all();
  90. return error;
  91. }
  92. /*
  93. * Generic mapping function (not visible outside):
  94. */
  95. /*
  96. * Remap an arbitrary physical address space into the kernel virtual
  97. * address space. Needed when the kernel wants to access high addresses
  98. * directly.
  99. *
  100. * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  101. * have to convert them into an offset in a page-aligned mapping, but the
  102. * caller shouldn't need to know that small detail.
  103. */
  104. #define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
  105. void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
  106. {
  107. struct vm_struct * area;
  108. unsigned long offset;
  109. phys_addr_t last_addr;
  110. void * addr;
  111. phys_addr = fixup_bigphys_addr(phys_addr, size);
  112. /* Don't allow wraparound or zero size */
  113. last_addr = phys_addr + size - 1;
  114. if (!size || last_addr < phys_addr)
  115. return NULL;
  116. /*
  117. * Map uncached objects in the low 512mb of address space using KSEG1,
  118. * otherwise map using page tables.
  119. */
  120. if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
  121. flags == _CACHE_UNCACHED)
  122. return (void __iomem *) CKSEG1ADDR(phys_addr);
  123. /*
  124. * Don't allow anybody to remap normal RAM that we're using..
  125. */
  126. if (phys_addr < virt_to_phys(high_memory)) {
  127. char *t_addr, *t_end;
  128. struct page *page;
  129. t_addr = __va(phys_addr);
  130. t_end = t_addr + (size - 1);
  131. for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
  132. if(!PageReserved(page))
  133. return NULL;
  134. }
  135. /*
  136. * Mappings have to be page-aligned
  137. */
  138. offset = phys_addr & ~PAGE_MASK;
  139. phys_addr &= PAGE_MASK;
  140. size = PAGE_ALIGN(last_addr + 1) - phys_addr;
  141. /*
  142. * Ok, go for it..
  143. */
  144. area = get_vm_area(size, VM_IOREMAP);
  145. if (!area)
  146. return NULL;
  147. addr = area->addr;
  148. if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
  149. vunmap(addr);
  150. return NULL;
  151. }
  152. return (void __iomem *) (offset + (char *)addr);
  153. }
  154. #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
  155. void __iounmap(const volatile void __iomem *addr)
  156. {
  157. struct vm_struct *p;
  158. if (IS_KSEG1(addr))
  159. return;
  160. p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
  161. if (!p)
  162. printk(KERN_ERR "iounmap: bad address %p\n", addr);
  163. kfree(p);
  164. }
  165. EXPORT_SYMBOL(__ioremap);
  166. EXPORT_SYMBOL(__iounmap);