pgalloc.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. /* pgalloc.c: page directory & page table allocation
  2. *
  3. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/sched.h>
  12. #include <linux/gfp.h>
  13. #include <linux/mm.h>
  14. #include <linux/highmem.h>
  15. #include <linux/quicklist.h>
  16. #include <asm/pgalloc.h>
  17. #include <asm/page.h>
  18. #include <asm/cacheflush.h>
  19. pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
  20. pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  21. {
  22. pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
  23. if (pte)
  24. clear_page(pte);
  25. return pte;
  26. }
  27. pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
  28. {
  29. struct page *page;
  30. #ifdef CONFIG_HIGHPTE
  31. page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
  32. #else
  33. page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
  34. #endif
  35. if (!page)
  36. return NULL;
  37. clear_highpage(page);
  38. if (!pgtable_page_ctor(page)) {
  39. __free_page(page);
  40. return NULL;
  41. }
  42. flush_dcache_page(page);
  43. return page;
  44. }
  45. void __set_pmd(pmd_t *pmdptr, unsigned long pmd)
  46. {
  47. unsigned long *__ste_p = pmdptr->ste;
  48. int loop;
  49. if (!pmd) {
  50. memset(__ste_p, 0, PME_SIZE);
  51. }
  52. else {
  53. BUG_ON(pmd & (0x3f00 | xAMPRx_SS | 0xe));
  54. for (loop = PME_SIZE; loop > 0; loop -= 4) {
  55. *__ste_p++ = pmd;
  56. pmd += __frv_PT_SIZE;
  57. }
  58. }
  59. frv_dcache_writeback((unsigned long) pmdptr, (unsigned long) (pmdptr + 1));
  60. }
  61. /*
  62. * List of all pgd's needed for non-PAE so it can invalidate entries
  63. * in both cached and uncached pgd's; not needed for PAE since the
  64. * kernel pmd is shared. If PAE were not to share the pmd a similar
  65. * tactic would be needed. This is essentially codepath-based locking
  66. * against pageattr.c; it is the unique case in which a valid change
  67. * of kernel pagetables can't be lazily synchronized by vmalloc faults.
  68. * vmalloc faults work because attached pagetables are never freed.
  69. * If the locking proves to be non-performant, a ticketing scheme with
  70. * checks at dup_mmap(), exec(), and other mmlist addition points
  71. * could be used. The locking scheme was chosen on the basis of
  72. * manfred's recommendations and having no core impact whatsoever.
  73. * -- nyc
  74. */
  75. DEFINE_SPINLOCK(pgd_lock);
  76. struct page *pgd_list;
  77. static inline void pgd_list_add(pgd_t *pgd)
  78. {
  79. struct page *page = virt_to_page(pgd);
  80. page->index = (unsigned long) pgd_list;
  81. if (pgd_list)
  82. set_page_private(pgd_list, (unsigned long) &page->index);
  83. pgd_list = page;
  84. set_page_private(page, (unsigned long)&pgd_list);
  85. }
  86. static inline void pgd_list_del(pgd_t *pgd)
  87. {
  88. struct page *next, **pprev, *page = virt_to_page(pgd);
  89. next = (struct page *) page->index;
  90. pprev = (struct page **) page_private(page);
  91. *pprev = next;
  92. if (next)
  93. set_page_private(next, (unsigned long) pprev);
  94. }
  95. void pgd_ctor(void *pgd)
  96. {
  97. unsigned long flags;
  98. if (PTRS_PER_PMD == 1)
  99. spin_lock_irqsave(&pgd_lock, flags);
  100. memcpy((pgd_t *) pgd + USER_PGDS_IN_LAST_PML4,
  101. swapper_pg_dir + USER_PGDS_IN_LAST_PML4,
  102. (PTRS_PER_PGD - USER_PGDS_IN_LAST_PML4) * sizeof(pgd_t));
  103. if (PTRS_PER_PMD > 1)
  104. return;
  105. pgd_list_add(pgd);
  106. spin_unlock_irqrestore(&pgd_lock, flags);
  107. memset(pgd, 0, USER_PGDS_IN_LAST_PML4 * sizeof(pgd_t));
  108. }
  109. /* never called when PTRS_PER_PMD > 1 */
  110. void pgd_dtor(void *pgd)
  111. {
  112. unsigned long flags; /* can be called from interrupt context */
  113. spin_lock_irqsave(&pgd_lock, flags);
  114. pgd_list_del(pgd);
  115. spin_unlock_irqrestore(&pgd_lock, flags);
  116. }
  117. pgd_t *pgd_alloc(struct mm_struct *mm)
  118. {
  119. return quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
  120. }
  121. void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  122. {
  123. /* in the non-PAE case, clear_page_tables() clears user pgd entries */
  124. quicklist_free(0, pgd_dtor, pgd);
  125. }
  126. void __init pgtable_cache_init(void)
  127. {
  128. }
  129. void check_pgt_cache(void)
  130. {
  131. quicklist_trim(0, pgd_dtor, 25, 16);
  132. }