pgalloc.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. /*
  2. * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
  3. * Copyright (C) 2012 Regents of the University of California
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation, version 2.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #ifndef _ASM_RISCV_PGALLOC_H
  15. #define _ASM_RISCV_PGALLOC_H
  16. #include <linux/mm.h>
  17. #include <asm/tlb.h>
  18. static inline void pmd_populate_kernel(struct mm_struct *mm,
  19. pmd_t *pmd, pte_t *pte)
  20. {
  21. unsigned long pfn = virt_to_pfn(pte);
  22. set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
  23. }
  24. static inline void pmd_populate(struct mm_struct *mm,
  25. pmd_t *pmd, pgtable_t pte)
  26. {
  27. unsigned long pfn = virt_to_pfn(page_address(pte));
  28. set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
  29. }
  30. #ifndef __PAGETABLE_PMD_FOLDED
  31. static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  32. {
  33. unsigned long pfn = virt_to_pfn(pmd);
  34. set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
  35. }
  36. #endif /* __PAGETABLE_PMD_FOLDED */
  37. #define pmd_pgtable(pmd) pmd_page(pmd)
  38. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  39. {
  40. pgd_t *pgd;
  41. pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
  42. if (likely(pgd != NULL)) {
  43. memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
  44. /* Copy kernel mappings */
  45. memcpy(pgd + USER_PTRS_PER_PGD,
  46. init_mm.pgd + USER_PTRS_PER_PGD,
  47. (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
  48. }
  49. return pgd;
  50. }
  51. static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  52. {
  53. free_page((unsigned long)pgd);
  54. }
  55. #ifndef __PAGETABLE_PMD_FOLDED
  56. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
  57. {
  58. return (pmd_t *)__get_free_page(
  59. GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
  60. }
  61. static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  62. {
  63. free_page((unsigned long)pmd);
  64. }
  65. #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
  66. #endif /* __PAGETABLE_PMD_FOLDED */
  67. static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
  68. unsigned long address)
  69. {
  70. return (pte_t *)__get_free_page(
  71. GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
  72. }
  73. static inline struct page *pte_alloc_one(struct mm_struct *mm,
  74. unsigned long address)
  75. {
  76. struct page *pte;
  77. pte = alloc_page(GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
  78. if (likely(pte != NULL))
  79. pgtable_page_ctor(pte);
  80. return pte;
  81. }
  82. static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  83. {
  84. free_page((unsigned long)pte);
  85. }
  86. static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
  87. {
  88. pgtable_page_dtor(pte);
  89. __free_page(pte);
  90. }
  91. #define __pte_free_tlb(tlb, pte, buf) \
  92. do { \
  93. pgtable_page_dtor(pte); \
  94. tlb_remove_page((tlb), pte); \
  95. } while (0)
  96. static inline void check_pgt_cache(void)
  97. {
  98. }
  99. #endif /* _ASM_RISCV_PGALLOC_H */