mem.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. /*
  2. * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3. * Licensed under the GPL
  4. */
  5. #include <linux/stddef.h>
  6. #include <linux/module.h>
  7. #include <linux/bootmem.h>
  8. #include <linux/highmem.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/slab.h>
  12. #include <asm/fixmap.h>
  13. #include <asm/page.h>
  14. #include <as-layout.h>
  15. #include <init.h>
  16. #include <kern.h>
  17. #include <kern_util.h>
  18. #include <mem_user.h>
  19. #include <os.h>
  20. /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
  21. unsigned long *empty_zero_page = NULL;
  22. EXPORT_SYMBOL(empty_zero_page);
  23. /* allocated in paging_init and unchanged thereafter */
  24. static unsigned long *empty_bad_page = NULL;
  25. /*
  26. * Initialized during boot, and readonly for initializing page tables
  27. * afterwards
  28. */
  29. pgd_t swapper_pg_dir[PTRS_PER_PGD];
  30. /* Initialized at boot time, and readonly after that */
  31. unsigned long long highmem;
  32. int kmalloc_ok = 0;
  33. /* Used during early boot */
  34. static unsigned long brk_end;
  35. void __init mem_init(void)
  36. {
  37. /* clear the zero-page */
  38. memset(empty_zero_page, 0, PAGE_SIZE);
  39. /* Map in the area just after the brk now that kmalloc is about
  40. * to be turned on.
  41. */
  42. brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
  43. map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
  44. free_bootmem(__pa(brk_end), uml_reserved - brk_end);
  45. uml_reserved = brk_end;
  46. /* this will put all low memory onto the freelists */
  47. free_all_bootmem();
  48. max_low_pfn = totalram_pages;
  49. max_pfn = totalram_pages;
  50. mem_init_print_info(NULL);
  51. kmalloc_ok = 1;
  52. }
  53. /*
  54. * Create a page table and place a pointer to it in a middle page
  55. * directory entry.
  56. */
  57. static void __init one_page_table_init(pmd_t *pmd)
  58. {
  59. if (pmd_none(*pmd)) {
  60. pte_t *pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
  61. set_pmd(pmd, __pmd(_KERNPG_TABLE +
  62. (unsigned long) __pa(pte)));
  63. if (pte != pte_offset_kernel(pmd, 0))
  64. BUG();
  65. }
  66. }
  67. static void __init one_md_table_init(pud_t *pud)
  68. {
  69. #ifdef CONFIG_3_LEVEL_PGTABLES
  70. pmd_t *pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
  71. set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
  72. if (pmd_table != pmd_offset(pud, 0))
  73. BUG();
  74. #endif
  75. }
  76. static void __init fixrange_init(unsigned long start, unsigned long end,
  77. pgd_t *pgd_base)
  78. {
  79. pgd_t *pgd;
  80. pud_t *pud;
  81. pmd_t *pmd;
  82. int i, j;
  83. unsigned long vaddr;
  84. vaddr = start;
  85. i = pgd_index(vaddr);
  86. j = pmd_index(vaddr);
  87. pgd = pgd_base + i;
  88. for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
  89. pud = pud_offset(pgd, vaddr);
  90. if (pud_none(*pud))
  91. one_md_table_init(pud);
  92. pmd = pmd_offset(pud, vaddr);
  93. for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
  94. one_page_table_init(pmd);
  95. vaddr += PMD_SIZE;
  96. }
  97. j = 0;
  98. }
  99. }
  100. static void __init fixaddr_user_init( void)
  101. {
  102. #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
  103. long size = FIXADDR_USER_END - FIXADDR_USER_START;
  104. pgd_t *pgd;
  105. pud_t *pud;
  106. pmd_t *pmd;
  107. pte_t *pte;
  108. phys_t p;
  109. unsigned long v, vaddr = FIXADDR_USER_START;
  110. if (!size)
  111. return;
  112. fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
  113. v = (unsigned long) alloc_bootmem_low_pages(size);
  114. memcpy((void *) v , (void *) FIXADDR_USER_START, size);
  115. p = __pa(v);
  116. for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
  117. p += PAGE_SIZE) {
  118. pgd = swapper_pg_dir + pgd_index(vaddr);
  119. pud = pud_offset(pgd, vaddr);
  120. pmd = pmd_offset(pud, vaddr);
  121. pte = pte_offset_kernel(pmd, vaddr);
  122. pte_set_val(*pte, p, PAGE_READONLY);
  123. }
  124. #endif
  125. }
  126. void __init paging_init(void)
  127. {
  128. unsigned long zones_size[MAX_NR_ZONES], vaddr;
  129. int i;
  130. empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
  131. empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
  132. for (i = 0; i < ARRAY_SIZE(zones_size); i++)
  133. zones_size[i] = 0;
  134. zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
  135. (uml_physmem >> PAGE_SHIFT);
  136. free_area_init(zones_size);
  137. /*
  138. * Fixed mappings, only the page table structure has to be
  139. * created - mappings will be set by set_fixmap():
  140. */
  141. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  142. fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
  143. fixaddr_user_init();
  144. }
  145. /*
  146. * This can't do anything because nothing in the kernel image can be freed
  147. * since it's not in kernel physical memory.
  148. */
  149. void free_initmem(void)
  150. {
  151. }
  152. #ifdef CONFIG_BLK_DEV_INITRD
  153. void free_initrd_mem(unsigned long start, unsigned long end)
  154. {
  155. free_reserved_area((void *)start, (void *)end, -1, "initrd");
  156. }
  157. #endif
  158. /* Allocate and free page tables. */
  159. pgd_t *pgd_alloc(struct mm_struct *mm)
  160. {
  161. pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
  162. if (pgd) {
  163. memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
  164. memcpy(pgd + USER_PTRS_PER_PGD,
  165. swapper_pg_dir + USER_PTRS_PER_PGD,
  166. (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
  167. }
  168. return pgd;
  169. }
  170. void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  171. {
  172. free_page((unsigned long) pgd);
  173. }
  174. pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  175. {
  176. pte_t *pte;
  177. pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
  178. return pte;
  179. }
  180. pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
  181. {
  182. struct page *pte;
  183. pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
  184. if (!pte)
  185. return NULL;
  186. if (!pgtable_page_ctor(pte)) {
  187. __free_page(pte);
  188. return NULL;
  189. }
  190. return pte;
  191. }
  192. #ifdef CONFIG_3_LEVEL_PGTABLES
  193. pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
  194. {
  195. pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
  196. if (pmd)
  197. memset(pmd, 0, PAGE_SIZE);
  198. return pmd;
  199. }
  200. #endif
  201. void *uml_kmalloc(int size, int flags)
  202. {
  203. return kmalloc(size, flags);
  204. }