highmem.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. #include <linux/module.h>
  2. #include <linux/highmem.h>
  3. #include <linux/sched.h>
  4. #include <linux/smp.h>
  5. #include <asm/fixmap.h>
  6. #include <asm/tlbflush.h>
  7. static pte_t *kmap_pte;
  8. unsigned long highstart_pfn, highend_pfn;
  9. void *kmap(struct page *page)
  10. {
  11. void *addr;
  12. might_sleep();
  13. if (!PageHighMem(page))
  14. return page_address(page);
  15. addr = kmap_high(page);
  16. flush_tlb_one((unsigned long)addr);
  17. return addr;
  18. }
  19. EXPORT_SYMBOL(kmap);
  20. void kunmap(struct page *page)
  21. {
  22. BUG_ON(in_interrupt());
  23. if (!PageHighMem(page))
  24. return;
  25. kunmap_high(page);
  26. }
  27. EXPORT_SYMBOL(kunmap);
  28. /*
  29. * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  30. * no global lock is needed and because the kmap code must perform a global TLB
  31. * invalidation when the kmap pool wraps.
  32. *
  33. * However when holding an atomic kmap is is not legal to sleep, so atomic
  34. * kmaps are appropriate for short, tight code paths only.
  35. */
  36. void *kmap_atomic(struct page *page)
  37. {
  38. unsigned long vaddr;
  39. int idx, type;
  40. /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  41. pagefault_disable();
  42. if (!PageHighMem(page))
  43. return page_address(page);
  44. type = kmap_atomic_idx_push();
  45. idx = type + KM_TYPE_NR*smp_processor_id();
  46. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  47. #ifdef CONFIG_DEBUG_HIGHMEM
  48. BUG_ON(!pte_none(*(kmap_pte - idx)));
  49. #endif
  50. set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
  51. local_flush_tlb_one((unsigned long)vaddr);
  52. return (void*) vaddr;
  53. }
  54. EXPORT_SYMBOL(kmap_atomic);
  55. void __kunmap_atomic(void *kvaddr)
  56. {
  57. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  58. int type;
  59. if (vaddr < FIXADDR_START) { // FIXME
  60. pagefault_enable();
  61. return;
  62. }
  63. type = kmap_atomic_idx();
  64. #ifdef CONFIG_DEBUG_HIGHMEM
  65. {
  66. int idx = type + KM_TYPE_NR * smp_processor_id();
  67. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  68. /*
  69. * force other mappings to Oops if they'll try to access
  70. * this pte without first remap it
  71. */
  72. pte_clear(&init_mm, vaddr, kmap_pte-idx);
  73. local_flush_tlb_one(vaddr);
  74. }
  75. #endif
  76. kmap_atomic_idx_pop();
  77. pagefault_enable();
  78. }
  79. EXPORT_SYMBOL(__kunmap_atomic);
  80. /*
  81. * This is the same as kmap_atomic() but can map memory that doesn't
  82. * have a struct page associated with it.
  83. */
  84. void *kmap_atomic_pfn(unsigned long pfn)
  85. {
  86. unsigned long vaddr;
  87. int idx, type;
  88. pagefault_disable();
  89. type = kmap_atomic_idx_push();
  90. idx = type + KM_TYPE_NR*smp_processor_id();
  91. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  92. set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
  93. flush_tlb_one(vaddr);
  94. return (void*) vaddr;
  95. }
  96. struct page *kmap_atomic_to_page(void *ptr)
  97. {
  98. unsigned long idx, vaddr = (unsigned long)ptr;
  99. pte_t *pte;
  100. if (vaddr < FIXADDR_START)
  101. return virt_to_page(ptr);
  102. idx = virt_to_fix(vaddr);
  103. pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  104. return pte_page(*pte);
  105. }
  106. void __init kmap_init(void)
  107. {
  108. unsigned long kmap_vstart;
  109. /* cache the first kmap pte */
  110. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  111. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  112. }