highmem.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. #include <linux/export.h>
  2. #include <linux/highmem.h>
  3. #include <linux/sched.h>
  4. #include <linux/smp.h>
  5. #include <linux/interrupt.h>
  6. #include <asm/fixmap.h>
  7. #include <asm/tlbflush.h>
  8. static pte_t *kmap_pte;
  9. unsigned long highstart_pfn, highend_pfn;
  10. void *kmap(struct page *page)
  11. {
  12. might_sleep();
  13. if (!PageHighMem(page))
  14. return page_address(page);
  15. return kmap_high(page);
  16. }
  17. EXPORT_SYMBOL(kmap);
  18. void kunmap(struct page *page)
  19. {
  20. BUG_ON(in_interrupt());
  21. if (!PageHighMem(page))
  22. return;
  23. kunmap_high(page);
  24. }
  25. EXPORT_SYMBOL(kunmap);
  26. /*
  27. * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  28. * no global lock is needed and because the kmap code must perform a global TLB
  29. * invalidation when the kmap pool wraps.
  30. *
  31. * However when holding an atomic kmap is is not legal to sleep, so atomic
  32. * kmaps are appropriate for short, tight code paths only.
  33. */
  34. void *kmap_atomic(struct page *page)
  35. {
  36. enum fixed_addresses idx;
  37. unsigned long vaddr;
  38. int type;
  39. preempt_disable();
  40. pagefault_disable();
  41. if (!PageHighMem(page))
  42. return page_address(page);
  43. type = kmap_atomic_idx_push();
  44. idx = type + KM_TYPE_NR * smp_processor_id();
  45. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  46. #ifdef CONFIG_DEBUG_HIGHMEM
  47. BUG_ON(!pte_none(*(kmap_pte - idx)));
  48. #endif
  49. set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL));
  50. return (void *)vaddr;
  51. }
  52. EXPORT_SYMBOL(kmap_atomic);
  53. void __kunmap_atomic(void *kvaddr)
  54. {
  55. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  56. int idx, type;
  57. if (kvaddr >= (void *)FIXADDR_START) {
  58. type = kmap_atomic_idx();
  59. idx = type + KM_TYPE_NR * smp_processor_id();
  60. /*
  61. * Force other mappings to Oops if they'll try to access this
  62. * pte without first remap it. Keeping stale mappings around
  63. * is a bad idea also, in case the page changes cacheability
  64. * attributes or becomes a protected page in a hypervisor.
  65. */
  66. pte_clear(&init_mm, vaddr, kmap_pte-idx);
  67. flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
  68. kmap_atomic_idx_pop();
  69. }
  70. pagefault_enable();
  71. preempt_enable();
  72. }
  73. EXPORT_SYMBOL(__kunmap_atomic);
  74. /*
  75. * This is the same as kmap_atomic() but can map memory that doesn't
  76. * have a struct page associated with it.
  77. */
  78. void *kmap_atomic_pfn(unsigned long pfn)
  79. {
  80. enum fixed_addresses idx;
  81. unsigned long vaddr;
  82. int type;
  83. preempt_disable();
  84. pagefault_disable();
  85. type = kmap_atomic_idx_push();
  86. idx = type + KM_TYPE_NR * smp_processor_id();
  87. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  88. #ifdef CONFIG_DEBUG_HIGHMEM
  89. BUG_ON(!pte_none(*(kmap_pte - idx)));
  90. #endif
  91. set_pte(kmap_pte - idx, pfn_pte(pfn, PAGE_KERNEL));
  92. flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
  93. return (void *)vaddr;
  94. }
  95. struct page *kmap_atomic_to_page(void *ptr)
  96. {
  97. unsigned long vaddr = (unsigned long)ptr;
  98. int idx;
  99. pte_t *pte;
  100. if (vaddr < FIXADDR_START)
  101. return virt_to_page(ptr);
  102. idx = virt_to_fix(vaddr);
  103. pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  104. return pte_page(*pte);
  105. }
  106. void __init kmap_init(void)
  107. {
  108. unsigned long kmap_vstart;
  109. /* cache the first kmap pte */
  110. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  111. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  112. }