tlbflush.h 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. #ifndef _ASM_IA64_TLBFLUSH_H
  2. #define _ASM_IA64_TLBFLUSH_H
  3. /*
  4. * Copyright (C) 2002 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. */
  7. #include <linux/mm.h>
  8. #include <asm/intrinsics.h>
  9. #include <asm/mmu_context.h>
  10. #include <asm/page.h>
  11. /*
  12. * Now for some TLB flushing routines. This is the kind of stuff that
  13. * can be very expensive, so try to avoid them whenever possible.
  14. */
  15. extern void setup_ptcg_sem(int max_purges, int from_palo);
  16. /*
  17. * Flush everything (kernel mapping may also have changed due to
  18. * vmalloc/vfree).
  19. */
  20. extern void local_flush_tlb_all (void);
  21. #ifdef CONFIG_SMP
  22. extern void smp_flush_tlb_all (void);
  23. extern void smp_flush_tlb_mm (struct mm_struct *mm);
  24. extern void smp_flush_tlb_cpumask (cpumask_t xcpumask);
  25. # define flush_tlb_all() smp_flush_tlb_all()
  26. #else
  27. # define flush_tlb_all() local_flush_tlb_all()
  28. # define smp_flush_tlb_cpumask(m) local_flush_tlb_all()
  29. #endif
  30. static inline void
  31. local_finish_flush_tlb_mm (struct mm_struct *mm)
  32. {
  33. if (mm == current->active_mm)
  34. activate_context(mm);
  35. }
  36. /*
  37. * Flush a specified user mapping. This is called, e.g., as a result of fork() and
  38. * exit(). fork() ends up here because the copy-on-write mechanism needs to write-protect
  39. * the PTEs of the parent task.
  40. */
  41. static inline void
  42. flush_tlb_mm (struct mm_struct *mm)
  43. {
  44. if (!mm)
  45. return;
  46. set_bit(mm->context, ia64_ctx.flushmap);
  47. mm->context = 0;
  48. if (atomic_read(&mm->mm_users) == 0)
  49. return; /* happens as a result of exit_mmap() */
  50. #ifdef CONFIG_SMP
  51. smp_flush_tlb_mm(mm);
  52. #else
  53. local_finish_flush_tlb_mm(mm);
  54. #endif
  55. }
  56. extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
  57. /*
  58. * Page-granular tlb flush.
  59. */
  60. static inline void
  61. flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
  62. {
  63. #ifdef CONFIG_SMP
  64. flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
  65. #else
  66. if (vma->vm_mm == current->active_mm)
  67. ia64_ptcl(addr, (PAGE_SHIFT << 2));
  68. else
  69. vma->vm_mm->context = 0;
  70. #endif
  71. }
  72. /*
  73. * Flush the local TLB. Invoked from another cpu using an IPI.
  74. */
  75. #ifdef CONFIG_SMP
  76. void smp_local_flush_tlb(void);
  77. #else
  78. #define smp_local_flush_tlb()
  79. #endif
  80. static inline void flush_tlb_kernel_range(unsigned long start,
  81. unsigned long end)
  82. {
  83. flush_tlb_all(); /* XXX fix me */
  84. }
  85. #endif /* _ASM_IA64_TLBFLUSH_H */