tlb-smp.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. /* SMP TLB support routines.
  2. *
  3. * Copyright (C) 2006-2008 Panasonic Corporation
  4. * All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * version 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/interrupt.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/init.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/cpumask.h>
  20. #include <linux/err.h>
  21. #include <linux/kernel.h>
  22. #include <linux/delay.h>
  23. #include <linux/sched.h>
  24. #include <linux/profile.h>
  25. #include <linux/smp.h>
  26. #include <asm/tlbflush.h>
  27. #include <asm/bitops.h>
  28. #include <asm/processor.h>
  29. #include <asm/bug.h>
  30. #include <asm/exceptions.h>
  31. #include <asm/hardirq.h>
  32. #include <asm/fpu.h>
  33. #include <asm/mmu_context.h>
  34. #include <asm/thread_info.h>
  35. #include <asm/cpu-regs.h>
  36. #include <asm/intctl-regs.h>
  37. /*
  38. * For flush TLB
  39. */
  40. #define FLUSH_ALL 0xffffffff
  41. static cpumask_t flush_cpumask;
  42. static struct mm_struct *flush_mm;
  43. static unsigned long flush_va;
  44. static DEFINE_SPINLOCK(tlbstate_lock);
  45. DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
  46. &init_mm, 0
  47. };
  48. static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
  49. unsigned long va);
  50. static void do_flush_tlb_all(void *info);
  51. /**
  52. * smp_flush_tlb - Callback to invalidate the TLB.
  53. * @unused: Callback context (ignored).
  54. */
  55. void smp_flush_tlb(void *unused)
  56. {
  57. unsigned long cpu_id;
  58. cpu_id = get_cpu();
  59. if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
  60. /* This was a BUG() but until someone can quote me the line
  61. * from the intel manual that guarantees an IPI to multiple
  62. * CPUs is retried _only_ on the erroring CPUs its staying as a
  63. * return
  64. *
  65. * BUG();
  66. */
  67. goto out;
  68. if (flush_va == FLUSH_ALL)
  69. local_flush_tlb();
  70. else
  71. local_flush_tlb_page(flush_mm, flush_va);
  72. smp_mb__before_atomic();
  73. cpumask_clear_cpu(cpu_id, &flush_cpumask);
  74. smp_mb__after_atomic();
  75. out:
  76. put_cpu();
  77. }
  78. /**
  79. * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
  80. * @cpumask: The list of CPUs to target.
  81. * @mm: The VM context to flush from (if va!=FLUSH_ALL).
  82. * @va: Virtual address to flush or FLUSH_ALL to flush everything.
  83. */
  84. static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
  85. unsigned long va)
  86. {
  87. cpumask_t tmp;
  88. /* A couple of sanity checks (to be removed):
  89. * - mask must not be empty
  90. * - current CPU must not be in mask
  91. * - we do not send IPIs to as-yet unbooted CPUs.
  92. */
  93. BUG_ON(!mm);
  94. BUG_ON(cpumask_empty(&cpumask));
  95. BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
  96. cpumask_and(&tmp, &cpumask, cpu_online_mask);
  97. BUG_ON(!cpumask_equal(&cpumask, &tmp));
  98. /* I'm not happy about this global shared spinlock in the MM hot path,
  99. * but we'll see how contended it is.
  100. *
  101. * Temporarily this turns IRQs off, so that lockups are detected by the
  102. * NMI watchdog.
  103. */
  104. spin_lock(&tlbstate_lock);
  105. flush_mm = mm;
  106. flush_va = va;
  107. #if NR_CPUS <= BITS_PER_LONG
  108. atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
  109. #else
  110. #error Not supported.
  111. #endif
  112. /* FIXME: if NR_CPUS>=3, change send_IPI_mask */
  113. smp_call_function(smp_flush_tlb, NULL, 1);
  114. while (!cpumask_empty(&flush_cpumask))
  115. /* Lockup detection does not belong here */
  116. smp_mb();
  117. flush_mm = NULL;
  118. flush_va = 0;
  119. spin_unlock(&tlbstate_lock);
  120. }
  121. /**
  122. * flush_tlb_mm - Invalidate TLB of specified VM context
  123. * @mm: The VM context to invalidate.
  124. */
  125. void flush_tlb_mm(struct mm_struct *mm)
  126. {
  127. cpumask_t cpu_mask;
  128. preempt_disable();
  129. cpumask_copy(&cpu_mask, mm_cpumask(mm));
  130. cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  131. local_flush_tlb();
  132. if (!cpumask_empty(&cpu_mask))
  133. flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
  134. preempt_enable();
  135. }
  136. /**
  137. * flush_tlb_current_task - Invalidate TLB of current task
  138. */
  139. void flush_tlb_current_task(void)
  140. {
  141. struct mm_struct *mm = current->mm;
  142. cpumask_t cpu_mask;
  143. preempt_disable();
  144. cpumask_copy(&cpu_mask, mm_cpumask(mm));
  145. cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  146. local_flush_tlb();
  147. if (!cpumask_empty(&cpu_mask))
  148. flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
  149. preempt_enable();
  150. }
  151. /**
  152. * flush_tlb_page - Invalidate TLB of page
  153. * @vma: The VM context to invalidate the page for.
  154. * @va: The virtual address of the page to invalidate.
  155. */
  156. void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
  157. {
  158. struct mm_struct *mm = vma->vm_mm;
  159. cpumask_t cpu_mask;
  160. preempt_disable();
  161. cpumask_copy(&cpu_mask, mm_cpumask(mm));
  162. cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
  163. local_flush_tlb_page(mm, va);
  164. if (!cpumask_empty(&cpu_mask))
  165. flush_tlb_others(cpu_mask, mm, va);
  166. preempt_enable();
  167. }
  168. /**
  169. * do_flush_tlb_all - Callback to completely invalidate a TLB
  170. * @unused: Callback context (ignored).
  171. */
  172. static void do_flush_tlb_all(void *unused)
  173. {
  174. local_flush_tlb_all();
  175. }
  176. /**
  177. * flush_tlb_all - Completely invalidate TLBs on all CPUs
  178. */
  179. void flush_tlb_all(void)
  180. {
  181. on_each_cpu(do_flush_tlb_all, 0, 1);
  182. }