mmu_context.h 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. #ifndef __PARISC_MMU_CONTEXT_H
  2. #define __PARISC_MMU_CONTEXT_H
  3. #include <linux/mm.h>
  4. #include <linux/sched.h>
  5. #include <linux/atomic.h>
  6. #include <asm/pgalloc.h>
  7. #include <asm/pgtable.h>
  8. #include <asm-generic/mm_hooks.h>
  9. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  10. {
  11. }
  12. /* on PA-RISC, we actually have enough contexts to justify an allocator
  13. * for them. prumpf */
  14. extern unsigned long alloc_sid(void);
  15. extern void free_sid(unsigned long);
  16. static inline int
  17. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  18. {
  19. BUG_ON(atomic_read(&mm->mm_users) != 1);
  20. mm->context = alloc_sid();
  21. return 0;
  22. }
  23. static inline void
  24. destroy_context(struct mm_struct *mm)
  25. {
  26. free_sid(mm->context);
  27. mm->context = 0;
  28. }
  29. static inline unsigned long __space_to_prot(mm_context_t context)
  30. {
  31. #if SPACEID_SHIFT == 0
  32. return context << 1;
  33. #else
  34. return context >> (SPACEID_SHIFT - 1);
  35. #endif
  36. }
  37. static inline void load_context(mm_context_t context)
  38. {
  39. mtsp(context, 3);
  40. mtctl(__space_to_prot(context), 8);
  41. }
  42. static inline void switch_mm_irqs_off(struct mm_struct *prev,
  43. struct mm_struct *next, struct task_struct *tsk)
  44. {
  45. if (prev != next) {
  46. mtctl(__pa(next->pgd), 25);
  47. load_context(next->context);
  48. }
  49. }
  50. static inline void switch_mm(struct mm_struct *prev,
  51. struct mm_struct *next, struct task_struct *tsk)
  52. {
  53. unsigned long flags;
  54. local_irq_save(flags);
  55. switch_mm_irqs_off(prev, next, tsk);
  56. local_irq_restore(flags);
  57. }
  58. #define switch_mm_irqs_off switch_mm_irqs_off
  59. #define deactivate_mm(tsk,mm) do { } while (0)
  60. static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
  61. {
  62. /*
  63. * Activate_mm is our one chance to allocate a space id
  64. * for a new mm created in the exec path. There's also
  65. * some lazy tlb stuff, which is currently dead code, but
  66. * we only allocate a space id if one hasn't been allocated
  67. * already, so we should be OK.
  68. */
  69. BUG_ON(next == &init_mm); /* Should never happen */
  70. if (next->context == 0)
  71. next->context = alloc_sid();
  72. switch_mm(prev,next,current);
  73. }
  74. #endif