highmem.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. /*
  2. * highmem.c: virtual kernel memory mappings for high memory
  3. *
  4. * Provides kernel-static versions of atomic kmap functions originally
  5. * found as inlines in include/asm-sparc/highmem.h. These became
  6. * needed as kmap_atomic() and kunmap_atomic() started getting
  7. * called from within modules.
  8. * -- Tomas Szepe <szepe@pinerecords.com>, September 2002
  9. *
  10. * But kmap_atomic() and kunmap_atomic() cannot be inlined in
  11. * modules because they are loaded with btfixup-ped functions.
  12. */
  13. /*
  14. * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  15. * gives a more generic (and caching) interface. But kmap_atomic can
  16. * be used in IRQ contexts, so in some (very limited) cases we need it.
  17. *
  18. * XXX This is an old text. Actually, it's good to use atomic kmaps,
  19. * provided you remember that they are atomic and not try to sleep
  20. * with a kmap taken, much like a spinlock. Non-atomic kmaps are
  21. * shared by CPUs, and so precious, and establishing them requires IPI.
  22. * Atomic kmaps are lightweight and we may have NCPUS more of them.
  23. */
  24. #include <linux/highmem.h>
  25. #include <linux/export.h>
  26. #include <linux/mm.h>
  27. #include <asm/cacheflush.h>
  28. #include <asm/tlbflush.h>
  29. #include <asm/pgalloc.h>
  30. #include <asm/vaddrs.h>
  31. pgprot_t kmap_prot;
  32. static pte_t *kmap_pte;
  33. void __init kmap_init(void)
  34. {
  35. unsigned long address;
  36. pmd_t *dir;
  37. address = __fix_to_virt(FIX_KMAP_BEGIN);
  38. dir = pmd_offset(pgd_offset_k(address), address);
  39. /* cache the first kmap pte */
  40. kmap_pte = pte_offset_kernel(dir, address);
  41. kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
  42. }
  43. void *kmap_atomic(struct page *page)
  44. {
  45. unsigned long vaddr;
  46. long idx, type;
  47. preempt_disable();
  48. pagefault_disable();
  49. if (!PageHighMem(page))
  50. return page_address(page);
  51. type = kmap_atomic_idx_push();
  52. idx = type + KM_TYPE_NR*smp_processor_id();
  53. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  54. /* XXX Fix - Anton */
  55. #if 0
  56. __flush_cache_one(vaddr);
  57. #else
  58. flush_cache_all();
  59. #endif
  60. #ifdef CONFIG_DEBUG_HIGHMEM
  61. BUG_ON(!pte_none(*(kmap_pte-idx)));
  62. #endif
  63. set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
  64. /* XXX Fix - Anton */
  65. #if 0
  66. __flush_tlb_one(vaddr);
  67. #else
  68. flush_tlb_all();
  69. #endif
  70. return (void*) vaddr;
  71. }
  72. EXPORT_SYMBOL(kmap_atomic);
  73. void __kunmap_atomic(void *kvaddr)
  74. {
  75. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  76. int type;
  77. if (vaddr < FIXADDR_START) { // FIXME
  78. pagefault_enable();
  79. preempt_enable();
  80. return;
  81. }
  82. type = kmap_atomic_idx();
  83. #ifdef CONFIG_DEBUG_HIGHMEM
  84. {
  85. unsigned long idx;
  86. idx = type + KM_TYPE_NR * smp_processor_id();
  87. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
  88. /* XXX Fix - Anton */
  89. #if 0
  90. __flush_cache_one(vaddr);
  91. #else
  92. flush_cache_all();
  93. #endif
  94. /*
  95. * force other mappings to Oops if they'll try to access
  96. * this pte without first remap it
  97. */
  98. pte_clear(&init_mm, vaddr, kmap_pte-idx);
  99. /* XXX Fix - Anton */
  100. #if 0
  101. __flush_tlb_one(vaddr);
  102. #else
  103. flush_tlb_all();
  104. #endif
  105. }
  106. #endif
  107. kmap_atomic_idx_pop();
  108. pagefault_enable();
  109. preempt_enable();
  110. }
  111. EXPORT_SYMBOL(__kunmap_atomic);