hugetlbpage-hash64.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. /*
  2. * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later)
  3. *
  4. * Copyright (C) 2003 David Gibson, IBM Corporation.
  5. *
  6. * Based on the IA-32 version:
  7. * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/hugetlb.h>
  11. #include <asm/pgtable.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/cacheflush.h>
  14. #include <asm/machdep.h>
  15. extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
  16. unsigned long pa, unsigned long rlags,
  17. unsigned long vflags, int psize, int ssize);
  18. int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
  19. pte_t *ptep, unsigned long trap, unsigned long flags,
  20. int ssize, unsigned int shift, unsigned int mmu_psize)
  21. {
  22. unsigned long vpn;
  23. unsigned long old_pte, new_pte;
  24. unsigned long rflags, pa, sz;
  25. long slot;
  26. BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
  27. /* Search the Linux page table for a match with va */
  28. vpn = hpt_vpn(ea, vsid, ssize);
  29. /* At this point, we have a pte (old_pte) which can be used to build
  30. * or update an HPTE. There are 2 cases:
  31. *
  32. * 1. There is a valid (present) pte with no associated HPTE (this is
  33. * the most common case)
  34. * 2. There is a valid (present) pte with an associated HPTE. The
  35. * current values of the pp bits in the HPTE prevent access
  36. * because we are doing software DIRTY bit management and the
  37. * page is currently not DIRTY.
  38. */
  39. do {
  40. old_pte = pte_val(*ptep);
  41. /* If PTE busy, retry the access */
  42. if (unlikely(old_pte & _PAGE_BUSY))
  43. return 0;
  44. /* If PTE permissions don't match, take page fault */
  45. if (unlikely(access & ~old_pte))
  46. return 1;
  47. /* Try to lock the PTE, add ACCESSED and DIRTY if it was
  48. * a write access */
  49. new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
  50. if (access & _PAGE_RW)
  51. new_pte |= _PAGE_DIRTY;
  52. } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
  53. old_pte, new_pte));
  54. rflags = 0x2 | (!(new_pte & _PAGE_RW));
  55. /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
  56. rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
  57. sz = ((1UL) << shift);
  58. if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
  59. /* No CPU has hugepages but lacks no execute, so we
  60. * don't need to worry about that case */
  61. rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
  62. /* Check if pte already has an hpte (case 2) */
  63. if (unlikely(old_pte & _PAGE_HASHPTE)) {
  64. /* There MIGHT be an HPTE for this pte */
  65. unsigned long hash, slot;
  66. hash = hpt_hash(vpn, shift, ssize);
  67. if (old_pte & _PAGE_F_SECOND)
  68. hash = ~hash;
  69. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  70. slot += (old_pte & _PAGE_F_GIX) >> 12;
  71. if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
  72. mmu_psize, ssize, flags) == -1)
  73. old_pte &= ~_PAGE_HPTEFLAGS;
  74. }
  75. if (likely(!(old_pte & _PAGE_HASHPTE))) {
  76. unsigned long hash = hpt_hash(vpn, shift, ssize);
  77. pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
  78. /* clear HPTE slot informations in new PTE */
  79. #ifdef CONFIG_PPC_64K_PAGES
  80. new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
  81. #else
  82. new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
  83. #endif
  84. /* Add in WIMG bits */
  85. rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
  86. _PAGE_COHERENT | _PAGE_GUARDED));
  87. /*
  88. * enable the memory coherence always
  89. */
  90. rflags |= HPTE_R_M;
  91. slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
  92. mmu_psize, ssize);
  93. /*
  94. * Hypervisor failure. Restore old pte and return -1
  95. * similar to __hash_page_*
  96. */
  97. if (unlikely(slot == -2)) {
  98. *ptep = __pte(old_pte);
  99. hash_failure_debug(ea, access, vsid, trap, ssize,
  100. mmu_psize, mmu_psize, old_pte);
  101. return -1;
  102. }
  103. new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
  104. }
  105. /*
  106. * No need to use ldarx/stdcx here
  107. */
  108. *ptep = __pte(new_pte & ~_PAGE_BUSY);
  109. return 0;
  110. }