init.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. /*
  2. * OpenRISC idle.c
  3. *
  4. * Linux architectural port borrowing liberally from similar works of
  5. * others. All original copyrights apply as per the original source
  6. * declaration.
  7. *
  8. * Modifications for the OpenRISC architecture:
  9. * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
  10. * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. */
  17. #include <linux/signal.h>
  18. #include <linux/sched.h>
  19. #include <linux/kernel.h>
  20. #include <linux/errno.h>
  21. #include <linux/string.h>
  22. #include <linux/types.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/mman.h>
  25. #include <linux/mm.h>
  26. #include <linux/swap.h>
  27. #include <linux/smp.h>
  28. #include <linux/bootmem.h>
  29. #include <linux/init.h>
  30. #include <linux/delay.h>
  31. #include <linux/blkdev.h> /* for initrd_* */
  32. #include <linux/pagemap.h>
  33. #include <linux/memblock.h>
  34. #include <asm/segment.h>
  35. #include <asm/pgalloc.h>
  36. #include <asm/pgtable.h>
  37. #include <asm/dma.h>
  38. #include <asm/io.h>
  39. #include <asm/tlb.h>
  40. #include <asm/mmu_context.h>
  41. #include <asm/kmap_types.h>
  42. #include <asm/fixmap.h>
  43. #include <asm/tlbflush.h>
  44. #include <asm/sections.h>
  45. int mem_init_done;
  46. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  47. static void __init zone_sizes_init(void)
  48. {
  49. unsigned long zones_size[MAX_NR_ZONES];
  50. /* Clear the zone sizes */
  51. memset(zones_size, 0, sizeof(zones_size));
  52. /*
  53. * We use only ZONE_NORMAL
  54. */
  55. zones_size[ZONE_NORMAL] = max_low_pfn;
  56. free_area_init(zones_size);
  57. }
  58. extern const char _s_kernel_ro[], _e_kernel_ro[];
  59. /*
  60. * Map all physical memory into kernel's address space.
  61. *
  62. * This is explicitly coded for two-level page tables, so if you need
  63. * something else then this needs to change.
  64. */
  65. static void __init map_ram(void)
  66. {
  67. unsigned long v, p, e;
  68. pgprot_t prot;
  69. pgd_t *pge;
  70. pud_t *pue;
  71. pmd_t *pme;
  72. pte_t *pte;
  73. /* These mark extents of read-only kernel pages...
  74. * ...from vmlinux.lds.S
  75. */
  76. struct memblock_region *region;
  77. v = PAGE_OFFSET;
  78. for_each_memblock(memory, region) {
  79. p = (u32) region->base & PAGE_MASK;
  80. e = p + (u32) region->size;
  81. v = (u32) __va(p);
  82. pge = pgd_offset_k(v);
  83. while (p < e) {
  84. int j;
  85. pue = pud_offset(pge, v);
  86. pme = pmd_offset(pue, v);
  87. if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
  88. panic("%s: OR1K kernel hardcoded for "
  89. "two-level page tables",
  90. __func__);
  91. }
  92. /* Alloc one page for holding PTE's... */
  93. pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
  94. set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
  95. /* Fill the newly allocated page with PTE'S */
  96. for (j = 0; p < e && j < PTRS_PER_PTE;
  97. v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
  98. if (v >= (u32) _e_kernel_ro ||
  99. v < (u32) _s_kernel_ro)
  100. prot = PAGE_KERNEL;
  101. else
  102. prot = PAGE_KERNEL_RO;
  103. set_pte(pte, mk_pte_phys(p, prot));
  104. }
  105. pge++;
  106. }
  107. printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
  108. region->base, region->base + region->size);
  109. }
  110. }
  111. void __init paging_init(void)
  112. {
  113. extern void tlb_init(void);
  114. unsigned long end;
  115. int i;
  116. printk(KERN_INFO "Setting up paging and PTEs.\n");
  117. /* clear out the init_mm.pgd that will contain the kernel's mappings */
  118. for (i = 0; i < PTRS_PER_PGD; i++)
  119. swapper_pg_dir[i] = __pgd(0);
  120. /* make sure the current pgd table points to something sane
  121. * (even if it is most probably not used until the next
  122. * switch_mm)
  123. */
  124. current_pgd[smp_processor_id()] = init_mm.pgd;
  125. end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
  126. map_ram();
  127. zone_sizes_init();
  128. /* self modifying code ;) */
  129. /* Since the old TLB miss handler has been running up until now,
  130. * the kernel pages are still all RW, so we can still modify the
  131. * text directly... after this change and a TLB flush, the kernel
  132. * pages will become RO.
  133. */
  134. {
  135. extern unsigned long dtlb_miss_handler;
  136. extern unsigned long itlb_miss_handler;
  137. unsigned long *dtlb_vector = __va(0x900);
  138. unsigned long *itlb_vector = __va(0xa00);
  139. printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
  140. *itlb_vector = ((unsigned long)&itlb_miss_handler -
  141. (unsigned long)itlb_vector) >> 2;
  142. /* Soft ordering constraint to ensure that dtlb_vector is
  143. * the last thing updated
  144. */
  145. barrier();
  146. printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
  147. *dtlb_vector = ((unsigned long)&dtlb_miss_handler -
  148. (unsigned long)dtlb_vector) >> 2;
  149. }
  150. /* Soft ordering constraint to ensure that cache invalidation and
  151. * TLB flush really happen _after_ code has been modified.
  152. */
  153. barrier();
  154. /* Invalidate instruction caches after code modification */
  155. mtspr(SPR_ICBIR, 0x900);
  156. mtspr(SPR_ICBIR, 0xa00);
  157. /* New TLB miss handlers and kernel page tables are in now place.
  158. * Make sure that page flags get updated for all pages in TLB by
  159. * flushing the TLB and forcing all TLB entries to be recreated
  160. * from their page table flags.
  161. */
  162. flush_tlb_all();
  163. }
  164. /* References to section boundaries */
  165. void __init mem_init(void)
  166. {
  167. BUG_ON(!mem_map);
  168. max_mapnr = max_low_pfn;
  169. high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
  170. /* clear the zero-page */
  171. memset((void *)empty_zero_page, 0, PAGE_SIZE);
  172. /* this will put all low memory onto the freelists */
  173. free_all_bootmem();
  174. mem_init_print_info(NULL);
  175. printk("mem_init_done ...........................................\n");
  176. mem_init_done = 1;
  177. return;
  178. }
  179. #ifdef CONFIG_BLK_DEV_INITRD
  180. void free_initrd_mem(unsigned long start, unsigned long end)
  181. {
  182. free_reserved_area((void *)start, (void *)end, -1, "initrd");
  183. }
  184. #endif
  185. void free_initmem(void)
  186. {
  187. free_initmem_default(-1);
  188. }