mcfmmu.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Based upon linux/arch/m68k/mm/sun3mmu.c
  4. * Based upon linux/arch/ppc/mm/mmu_context.c
  5. *
  6. * Implementations of mm routines specific to the Coldfire MMU.
  7. *
  8. * Copyright (c) 2008 Freescale Semiconductor, Inc.
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/types.h>
  12. #include <linux/mm.h>
  13. #include <linux/init.h>
  14. #include <linux/string.h>
  15. #include <linux/bootmem.h>
  16. #include <linux/memblock.h>
  17. #include <asm/setup.h>
  18. #include <asm/page.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/mmu_context.h>
  21. #include <asm/mcf_pgalloc.h>
  22. #include <asm/tlbflush.h>
  23. #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
  24. mm_context_t next_mmu_context;
  25. unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
  26. atomic_t nr_free_contexts;
  27. struct mm_struct *context_mm[LAST_CONTEXT+1];
  28. unsigned long num_pages;
  29. /*
  30. * ColdFire paging_init derived from sun3.
  31. */
  32. void __init paging_init(void)
  33. {
  34. pgd_t *pg_dir;
  35. pte_t *pg_table;
  36. unsigned long address, size;
  37. unsigned long next_pgtable, bootmem_end;
  38. unsigned long zones_size[MAX_NR_ZONES];
  39. enum zone_type zone;
  40. int i;
  41. empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
  42. memset((void *) empty_zero_page, 0, PAGE_SIZE);
  43. pg_dir = swapper_pg_dir;
  44. memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
  45. size = num_pages * sizeof(pte_t);
  46. size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
  47. next_pgtable = (unsigned long) alloc_bootmem_pages(size);
  48. bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
  49. pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
  50. address = PAGE_OFFSET;
  51. while (address < (unsigned long)high_memory) {
  52. pg_table = (pte_t *) next_pgtable;
  53. next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
  54. pgd_val(*pg_dir) = (unsigned long) pg_table;
  55. pg_dir++;
  56. /* now change pg_table to kernel virtual addresses */
  57. for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
  58. pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
  59. if (address >= (unsigned long) high_memory)
  60. pte_val(pte) = 0;
  61. set_pte(pg_table, pte);
  62. address += PAGE_SIZE;
  63. }
  64. }
  65. current->mm = NULL;
  66. for (zone = 0; zone < MAX_NR_ZONES; zone++)
  67. zones_size[zone] = 0x0;
  68. zones_size[ZONE_DMA] = num_pages;
  69. free_area_init(zones_size);
  70. }
  71. int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
  72. {
  73. unsigned long flags, mmuar, mmutr;
  74. struct mm_struct *mm;
  75. pgd_t *pgd;
  76. pmd_t *pmd;
  77. pte_t *pte;
  78. int asid;
  79. local_irq_save(flags);
  80. mmuar = (dtlb) ? mmu_read(MMUAR) :
  81. regs->pc + (extension_word * sizeof(long));
  82. mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
  83. if (!mm) {
  84. local_irq_restore(flags);
  85. return -1;
  86. }
  87. pgd = pgd_offset(mm, mmuar);
  88. if (pgd_none(*pgd)) {
  89. local_irq_restore(flags);
  90. return -1;
  91. }
  92. pmd = pmd_offset(pgd, mmuar);
  93. if (pmd_none(*pmd)) {
  94. local_irq_restore(flags);
  95. return -1;
  96. }
  97. pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
  98. : pte_offset_map(pmd, mmuar);
  99. if (pte_none(*pte) || !pte_present(*pte)) {
  100. local_irq_restore(flags);
  101. return -1;
  102. }
  103. if (write) {
  104. if (!pte_write(*pte)) {
  105. local_irq_restore(flags);
  106. return -1;
  107. }
  108. set_pte(pte, pte_mkdirty(*pte));
  109. }
  110. set_pte(pte, pte_mkyoung(*pte));
  111. asid = mm->context & 0xff;
  112. if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
  113. set_pte(pte, pte_wrprotect(*pte));
  114. mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
  115. if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
  116. mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
  117. mmu_write(MMUTR, mmutr);
  118. mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
  119. ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
  120. if (dtlb)
  121. mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
  122. else
  123. mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
  124. local_irq_restore(flags);
  125. return 0;
  126. }
  127. void __init cf_bootmem_alloc(void)
  128. {
  129. unsigned long memstart;
  130. /* _rambase and _ramend will be naturally page aligned */
  131. m68k_memory[0].addr = _rambase;
  132. m68k_memory[0].size = _ramend - _rambase;
  133. memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
  134. /* compute total pages in system */
  135. num_pages = PFN_DOWN(_ramend - _rambase);
  136. /* page numbers */
  137. memstart = PAGE_ALIGN(_ramstart);
  138. min_low_pfn = PFN_DOWN(_rambase);
  139. max_pfn = max_low_pfn = PFN_DOWN(_ramend);
  140. high_memory = (void *)_ramend;
  141. /* Reserve kernel text/data/bss */
  142. memblock_reserve(_rambase, memstart - _rambase);
  143. m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
  144. module_fixup(NULL, __start_fixup, __stop_fixup);
  145. /* setup node data */
  146. m68k_setup_node(0);
  147. }
  148. /*
  149. * Initialize the context management stuff.
  150. * The following was taken from arch/ppc/mmu_context.c
  151. */
  152. void __init cf_mmu_context_init(void)
  153. {
  154. /*
  155. * Some processors have too few contexts to reserve one for
  156. * init_mm, and require using context 0 for a normal task.
  157. * Other processors reserve the use of context zero for the kernel.
  158. * This code assumes FIRST_CONTEXT < 32.
  159. */
  160. context_map[0] = (1 << FIRST_CONTEXT) - 1;
  161. next_mmu_context = FIRST_CONTEXT;
  162. atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
  163. }
  164. /*
  165. * Steal a context from a task that has one at the moment.
  166. * This is only used on 8xx and 4xx and we presently assume that
  167. * they don't do SMP. If they do then thicfpgalloc.hs will have to check
  168. * whether the MM we steal is in use.
  169. * We also assume that this is only used on systems that don't
  170. * use an MMU hash table - this is true for 8xx and 4xx.
  171. * This isn't an LRU system, it just frees up each context in
  172. * turn (sort-of pseudo-random replacement :). This would be the
  173. * place to implement an LRU scheme if anyone was motivated to do it.
  174. * -- paulus
  175. */
  176. void steal_context(void)
  177. {
  178. struct mm_struct *mm;
  179. /*
  180. * free up context `next_mmu_context'
  181. * if we shouldn't free context 0, don't...
  182. */
  183. if (next_mmu_context < FIRST_CONTEXT)
  184. next_mmu_context = FIRST_CONTEXT;
  185. mm = context_mm[next_mmu_context];
  186. flush_tlb_mm(mm);
  187. destroy_context(mm);
  188. }