oom.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __INCLUDE_LINUX_OOM_H
  3. #define __INCLUDE_LINUX_OOM_H
  4. #include <linux/sched/signal.h>
  5. #include <linux/types.h>
  6. #include <linux/nodemask.h>
  7. #include <uapi/linux/oom.h>
  8. #include <linux/sched/coredump.h> /* MMF_* */
  9. #include <linux/mm.h> /* VM_FAULT* */
  10. struct zonelist;
  11. struct notifier_block;
  12. struct mem_cgroup;
  13. struct task_struct;
  14. /*
  15. * Details of the page allocation that triggered the oom killer that are used to
  16. * determine what should be killed.
  17. */
  18. struct oom_control {
  19. /* Used to determine cpuset */
  20. struct zonelist *zonelist;
  21. /* Used to determine mempolicy */
  22. nodemask_t *nodemask;
  23. /* Memory cgroup in which oom is invoked, or NULL for global oom */
  24. struct mem_cgroup *memcg;
  25. /* Used to determine cpuset and node locality requirement */
  26. const gfp_t gfp_mask;
  27. /*
  28. * order == -1 means the oom kill is required by sysrq, otherwise only
  29. * for display purposes.
  30. */
  31. const int order;
  32. /* Used by oom implementation, do not set */
  33. unsigned long totalpages;
  34. struct task_struct *chosen;
  35. unsigned long chosen_points;
  36. };
  37. extern struct mutex oom_lock;
  38. static inline void set_current_oom_origin(void)
  39. {
  40. current->signal->oom_flag_origin = true;
  41. }
  42. static inline void clear_current_oom_origin(void)
  43. {
  44. current->signal->oom_flag_origin = false;
  45. }
  46. static inline bool oom_task_origin(const struct task_struct *p)
  47. {
  48. return p->signal->oom_flag_origin;
  49. }
  50. static inline bool tsk_is_oom_victim(struct task_struct * tsk)
  51. {
  52. return tsk->signal->oom_mm;
  53. }
  54. /*
  55. * Use this helper if tsk->mm != mm and the victim mm needs a special
  56. * handling. This is guaranteed to stay true after once set.
  57. */
  58. static inline bool mm_is_oom_victim(struct mm_struct *mm)
  59. {
  60. return test_bit(MMF_OOM_VICTIM, &mm->flags);
  61. }
  62. /*
  63. * Checks whether a page fault on the given mm is still reliable.
  64. * This is no longer true if the oom reaper started to reap the
  65. * address space which is reflected by MMF_UNSTABLE flag set in
  66. * the mm. At that moment any !shared mapping would lose the content
  67. * and could cause a memory corruption (zero pages instead of the
  68. * original content).
  69. *
  70. * User should call this before establishing a page table entry for
  71. * a !shared mapping and under the proper page table lock.
  72. *
  73. * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
  74. */
  75. static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
  76. {
  77. if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
  78. return VM_FAULT_SIGBUS;
  79. return 0;
  80. }
  81. bool __oom_reap_task_mm(struct mm_struct *mm);
  82. extern unsigned long oom_badness(struct task_struct *p,
  83. struct mem_cgroup *memcg, const nodemask_t *nodemask,
  84. unsigned long totalpages);
  85. extern bool out_of_memory(struct oom_control *oc);
  86. extern void exit_oom_victim(void);
  87. extern int register_oom_notifier(struct notifier_block *nb);
  88. extern int unregister_oom_notifier(struct notifier_block *nb);
  89. extern bool oom_killer_disable(signed long timeout);
  90. extern void oom_killer_enable(void);
  91. extern struct task_struct *find_lock_task_mm(struct task_struct *p);
  92. /* sysctls */
  93. extern int sysctl_oom_dump_tasks;
  94. extern int sysctl_oom_kill_allocating_task;
  95. extern int sysctl_panic_on_oom;
  96. #endif /* _INCLUDE_LINUX_OOM_H */