highmem.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. #include <linux/compiler.h>
  2. #include <linux/module.h>
  3. #include <linux/highmem.h>
  4. #include <linux/sched.h>
  5. #include <linux/smp.h>
  6. #include <asm/fixmap.h>
  7. #include <asm/tlbflush.h>
  8. static pte_t *kmap_pte;
  9. unsigned long highstart_pfn, highend_pfn;
  10. void *kmap(struct page *page)
  11. {
  12. void *addr;
  13. might_sleep();
  14. if (!PageHighMem(page))
  15. return page_address(page);
  16. addr = kmap_high(page);
  17. flush_tlb_one((unsigned long)addr);
  18. return addr;
  19. }
  20. EXPORT_SYMBOL(kmap);
  21. void kunmap(struct page *page)
  22. {
  23. BUG_ON(in_interrupt());
  24. if (!PageHighMem(page))
  25. return;
  26. kunmap_high(page);
  27. }
  28. EXPORT_SYMBOL(kunmap);
  29. /*
  30. * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  31. * no global lock is needed and because the kmap code must perform a global TLB
  32. * invalidation when the kmap pool wraps.
  33. *
  34. * However when holding an atomic kmap is is not legal to sleep, so atomic
  35. * kmaps are appropriate for short, tight code paths only.
  36. */
  37. void *kmap_atomic(struct page *page)
  38. {
  39. unsigned long vaddr;
  40. int idx, type;
  41. preempt_disable();
  42. pagefault_disable();
  43. if (!PageHighMem(page))
  44. return page_address(page);
  45. type = kmap_atomic_idx_push();
  46. idx = type + KM_TYPE_NR*smp_processor_id();
  47. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  48. #ifdef CONFIG_DEBUG_HIGHMEM
  49. BUG_ON(!pte_none(*(kmap_pte - idx)));
  50. #endif
  51. set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
  52. local_flush_tlb_one((unsigned long)vaddr);
  53. return (void*) vaddr;
  54. }
  55. EXPORT_SYMBOL(kmap_atomic);
  56. void __kunmap_atomic(void *kvaddr)
  57. {
  58. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  59. int type __maybe_unused;
  60. if (vaddr < FIXADDR_START) { // FIXME
  61. pagefault_enable();
  62. preempt_enable();
  63. return;
  64. }
  65. type = kmap_atomic_idx();
  66. #ifdef CONFIG_DEBUG_HIGHMEM
  67. {
  68. int idx = type + KM_TYPE_NR * smp_processor_id();
  69. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  70. /*
  71. * force other mappings to Oops if they'll try to access
  72. * this pte without first remap it
  73. */
  74. pte_clear(&init_mm, vaddr, kmap_pte-idx);
  75. local_flush_tlb_one(vaddr);
  76. }
  77. #endif
  78. kmap_atomic_idx_pop();
  79. pagefault_enable();
  80. preempt_enable();
  81. }
  82. EXPORT_SYMBOL(__kunmap_atomic);
  83. /*
  84. * This is the same as kmap_atomic() but can map memory that doesn't
  85. * have a struct page associated with it.
  86. */
  87. void *kmap_atomic_pfn(unsigned long pfn)
  88. {
  89. unsigned long vaddr;
  90. int idx, type;
  91. preempt_disable();
  92. pagefault_disable();
  93. type = kmap_atomic_idx_push();
  94. idx = type + KM_TYPE_NR*smp_processor_id();
  95. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  96. set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
  97. flush_tlb_one(vaddr);
  98. return (void*) vaddr;
  99. }
  100. struct page *kmap_atomic_to_page(void *ptr)
  101. {
  102. unsigned long idx, vaddr = (unsigned long)ptr;
  103. pte_t *pte;
  104. if (vaddr < FIXADDR_START)
  105. return virt_to_page(ptr);
  106. idx = virt_to_fix(vaddr);
  107. pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  108. return pte_page(*pte);
  109. }
  110. void __init kmap_init(void)
  111. {
  112. unsigned long kmap_vstart;
  113. /* cache the first kmap pte */
  114. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  115. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  116. }