tlb-smp.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. /* SMP TLB support routines.
  2. *
  3. * Copyright (C) 2006-2008 Panasonic Corporation
  4. * All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * version 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/interrupt.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/init.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/cpumask.h>
  20. #include <linux/err.h>
  21. #include <linux/kernel.h>
  22. #include <linux/delay.h>
  23. #include <linux/sched.h>
  24. #include <linux/profile.h>
  25. #include <linux/smp.h>
  26. #include <asm/tlbflush.h>
  27. #include <asm/system.h>
  28. #include <asm/bitops.h>
  29. #include <asm/processor.h>
  30. #include <asm/bug.h>
  31. #include <asm/exceptions.h>
  32. #include <asm/hardirq.h>
  33. #include <asm/fpu.h>
  34. #include <asm/mmu_context.h>
  35. #include <asm/thread_info.h>
  36. #include <asm/cpu-regs.h>
  37. #include <asm/intctl-regs.h>
  38. /*
  39. * For flush TLB
  40. */
  41. #define FLUSH_ALL 0xffffffff
  42. static cpumask_t flush_cpumask;
  43. static struct mm_struct *flush_mm;
  44. static unsigned long flush_va;
  45. static DEFINE_SPINLOCK(tlbstate_lock);
  46. DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
  47. &init_mm, 0
  48. };
  49. static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
  50. unsigned long va);
  51. static void do_flush_tlb_all(void *info);
  52. /**
  53. * smp_flush_tlb - Callback to invalidate the TLB.
  54. * @unused: Callback context (ignored).
  55. */
  56. void smp_flush_tlb(void *unused)
  57. {
  58. unsigned long cpu_id;
  59. cpu_id = get_cpu();
  60. if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
  61. /* This was a BUG() but until someone can quote me the line
  62. * from the intel manual that guarantees an IPI to multiple
  63. * CPUs is retried _only_ on the erroring CPUs its staying as a
  64. * return
  65. *
  66. * BUG();
  67. */
  68. goto out;
  69. if (flush_va == FLUSH_ALL)
  70. local_flush_tlb();
  71. else
  72. local_flush_tlb_page(flush_mm, flush_va);
  73. smp_mb__before_clear_bit();
  74. cpumask_clear_cpu(cpu_id, &flush_cpumask);
  75. smp_mb__after_clear_bit();
  76. out:
  77. put_cpu();
  78. }
  79. /**
  80. * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
  81. * @cpumask: The list of CPUs to target.
  82. * @mm: The VM context to flush from (if va!=FLUSH_ALL).
  83. * @va: Virtual address to flush or FLUSH_ALL to flush everything.
  84. */
  85. static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
  86. unsigned long va)
  87. {
  88. cpumask_t tmp;
  89. /* A couple of sanity checks (to be removed):
  90. * - mask must not be empty
  91. * - current CPU must not be in mask
  92. * - we do not send IPIs to as-yet unbooted CPUs.
  93. */
  94. BUG_ON(!mm);
  95. BUG_ON(cpumask_empty(&cpumask));
  96. BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
  97. cpumask_and(&tmp, &cpumask, cpu_online_mask);
  98. BUG_ON(!cpumask_equal(&cpumask, &tmp));
  99. /* I'm not happy about this global shared spinlock in the MM hot path,
  100. * but we'll see how contended it is.
  101. *
  102. * Temporarily this turns IRQs off, so that lockups are detected by the
  103. * NMI watchdog.
  104. */
  105. spin_lock(&tlbstate_lock);
  106. flush_mm = mm;
  107. flush_va = va;
  108. #if NR_CPUS <= BITS_PER_LONG
  109. atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
  110. #else
  111. #error Not supported.
  112. #endif
  113. /* FIXME: if NR_CPUS>=3, change send_IPI_mask */
  114. smp_call_function(smp_flush_tlb, NULL, 1);
  115. while (!cpumask_empty(&flush_cpumask))
  116. /* Lockup detection does not belong here */
  117. smp_mb();
  118. flush_mm = NULL;
  119. flush_va = 0;
  120. spin_unlock(&tlbstate_lock);
  121. }
  122. /**
  123. * flush_tlb_mm - Invalidate TLB of specified VM context
  124. * @mm: The VM context to invalidate.
  125. */
  126. void flush_tlb_mm(struct mm_struct *mm)
  127. {
  128. cpumask_t cpu_mask;
  129. preempt_disable();
  130. cpumask_copy(&cpu_mask, mm_cpumask(mm));
  131. cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  132. local_flush_tlb();
  133. if (!cpumask_empty(&cpu_mask))
  134. flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
  135. preempt_enable();
  136. }
  137. /**
  138. * flush_tlb_current_task - Invalidate TLB of current task
  139. */
  140. void flush_tlb_current_task(void)
  141. {
  142. struct mm_struct *mm = current->mm;
  143. cpumask_t cpu_mask;
  144. preempt_disable();
  145. cpumask_copy(&cpu_mask, mm_cpumask(mm));
  146. cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  147. local_flush_tlb();
  148. if (!cpumask_empty(&cpu_mask))
  149. flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
  150. preempt_enable();
  151. }
  152. /**
  153. * flush_tlb_page - Invalidate TLB of page
  154. * @vma: The VM context to invalidate the page for.
  155. * @va: The virtual address of the page to invalidate.
  156. */
  157. void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
  158. {
  159. struct mm_struct *mm = vma->vm_mm;
  160. cpumask_t cpu_mask;
  161. preempt_disable();
  162. cpumask_copy(&cpu_mask, mm_cpumask(mm));
  163. cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  164. local_flush_tlb_page(mm, va);
  165. if (!cpumask_empty(&cpu_mask))
  166. flush_tlb_others(cpu_mask, mm, va);
  167. preempt_enable();
  168. }
  169. /**
  170. * do_flush_tlb_all - Callback to completely invalidate a TLB
  171. * @unused: Callback context (ignored).
  172. */
  173. static void do_flush_tlb_all(void *unused)
  174. {
  175. local_flush_tlb_all();
  176. }
  177. /**
  178. * flush_tlb_all - Completely invalidate TLBs on all CPUs
  179. */
  180. void flush_tlb_all(void)
  181. {
  182. on_each_cpu(do_flush_tlb_all, 0, 1);
  183. }