mmu_context.c 1.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263
  1. /* Copyright (C) 2009 Red Hat, Inc.
  2. *
  3. * See ../COPYING for licensing terms.
  4. */
  5. #include <linux/mm.h>
  6. #include <linux/sched.h>
  7. #include <linux/mmu_context.h>
  8. #include <linux/export.h>
  9. #include <asm/mmu_context.h>
  10. /*
  11. * use_mm
  12. * Makes the calling kernel thread take on the specified
  13. * mm context.
  14. * (Note: this routine is intended to be called only
  15. * from a kernel thread context)
  16. */
  17. void use_mm(struct mm_struct *mm)
  18. {
  19. struct mm_struct *active_mm;
  20. struct task_struct *tsk = current;
  21. task_lock(tsk);
  22. active_mm = tsk->active_mm;
  23. if (active_mm != mm) {
  24. atomic_inc(&mm->mm_count);
  25. tsk->active_mm = mm;
  26. }
  27. tsk->mm = mm;
  28. switch_mm(active_mm, mm, tsk);
  29. task_unlock(tsk);
  30. #ifdef finish_arch_post_lock_switch
  31. finish_arch_post_lock_switch();
  32. #endif
  33. if (active_mm != mm)
  34. mmdrop(active_mm);
  35. }
  36. EXPORT_SYMBOL_GPL(use_mm);
  37. /*
  38. * unuse_mm
  39. * Reverses the effect of use_mm, i.e. releases the
  40. * specified mm context which was earlier taken on
  41. * by the calling kernel thread
  42. * (Note: this routine is intended to be called only
  43. * from a kernel thread context)
  44. */
  45. void unuse_mm(struct mm_struct *mm)
  46. {
  47. struct task_struct *tsk = current;
  48. task_lock(tsk);
  49. sync_mm_rss(mm);
  50. tsk->mm = NULL;
  51. /* active_mm is still 'mm' */
  52. enter_lazy_tlb(mm, tsk);
  53. task_unlock(tsk);
  54. }
  55. EXPORT_SYMBOL_GPL(unuse_mm);