memcontrol.h 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339
  1. /* memcontrol.h - Memory Controller
  2. *
  3. * Copyright IBM Corporation, 2007
  4. * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5. *
  6. * Copyright 2007 OpenVZ SWsoft Inc
  7. * Author: Pavel Emelianov <xemul@openvz.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #ifndef _LINUX_MEMCONTROL_H
  20. #define _LINUX_MEMCONTROL_H
  21. #include <linux/cgroup.h>
  22. #include <linux/vm_event_item.h>
  23. #include <linux/hardirq.h>
  24. #include <linux/jump_label.h>
  25. #include <linux/page_counter.h>
  26. #include <linux/vmpressure.h>
  27. #include <linux/eventfd.h>
  28. #include <linux/mm.h>
  29. #include <linux/vmstat.h>
  30. #include <linux/writeback.h>
  31. #include <linux/page-flags.h>
  32. struct mem_cgroup;
  33. struct page;
  34. struct mm_struct;
  35. struct kmem_cache;
  36. /* Cgroup-specific page state, on top of universal node page state */
  37. enum memcg_stat_item {
  38. MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
  39. MEMCG_RSS,
  40. MEMCG_RSS_HUGE,
  41. MEMCG_SWAP,
  42. MEMCG_SOCK,
  43. /* XXX: why are these zone and not node counters? */
  44. MEMCG_KERNEL_STACK_KB,
  45. MEMCG_NR_STAT,
  46. };
  47. enum memcg_memory_event {
  48. MEMCG_LOW,
  49. MEMCG_HIGH,
  50. MEMCG_MAX,
  51. MEMCG_OOM,
  52. MEMCG_OOM_KILL,
  53. MEMCG_SWAP_MAX,
  54. MEMCG_SWAP_FAIL,
  55. MEMCG_NR_MEMORY_EVENTS,
  56. };
  57. enum mem_cgroup_protection {
  58. MEMCG_PROT_NONE,
  59. MEMCG_PROT_LOW,
  60. MEMCG_PROT_MIN,
  61. };
  62. struct mem_cgroup_reclaim_cookie {
  63. pg_data_t *pgdat;
  64. int priority;
  65. unsigned int generation;
  66. };
  67. #ifdef CONFIG_MEMCG
  68. #define MEM_CGROUP_ID_SHIFT 16
  69. #define MEM_CGROUP_ID_MAX USHRT_MAX
  70. struct mem_cgroup_id {
  71. int id;
  72. atomic_t ref;
  73. };
  74. /*
  75. * Per memcg event counter is incremented at every pagein/pageout. With THP,
  76. * it will be incremated by the number of pages. This counter is used for
  77. * for trigger some periodic events. This is straightforward and better
  78. * than using jiffies etc. to handle periodic memcg event.
  79. */
  80. enum mem_cgroup_events_target {
  81. MEM_CGROUP_TARGET_THRESH,
  82. MEM_CGROUP_TARGET_SOFTLIMIT,
  83. MEM_CGROUP_TARGET_NUMAINFO,
  84. MEM_CGROUP_NTARGETS,
  85. };
  86. struct mem_cgroup_stat_cpu {
  87. long count[MEMCG_NR_STAT];
  88. unsigned long events[NR_VM_EVENT_ITEMS];
  89. unsigned long nr_page_events;
  90. unsigned long targets[MEM_CGROUP_NTARGETS];
  91. };
  92. struct mem_cgroup_reclaim_iter {
  93. struct mem_cgroup *position;
  94. /* scan generation, increased every round-trip */
  95. unsigned int generation;
  96. };
  97. struct lruvec_stat {
  98. long count[NR_VM_NODE_STAT_ITEMS];
  99. };
  100. /*
  101. * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
  102. * which have elements charged to this memcg.
  103. */
  104. struct memcg_shrinker_map {
  105. struct rcu_head rcu;
  106. unsigned long map[0];
  107. };
  108. /*
  109. * per-zone information in memory controller.
  110. */
  111. struct mem_cgroup_per_node {
  112. struct lruvec lruvec;
  113. struct lruvec_stat __percpu *lruvec_stat_cpu;
  114. atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
  115. unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
  116. struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
  117. #ifdef CONFIG_MEMCG_KMEM
  118. struct memcg_shrinker_map __rcu *shrinker_map;
  119. #endif
  120. struct rb_node tree_node; /* RB tree node */
  121. unsigned long usage_in_excess;/* Set to the value by which */
  122. /* the soft limit is exceeded*/
  123. bool on_tree;
  124. bool congested; /* memcg has many dirty pages */
  125. /* backed by a congested BDI */
  126. struct mem_cgroup *memcg; /* Back pointer, we cannot */
  127. /* use container_of */
  128. };
  129. struct mem_cgroup_threshold {
  130. struct eventfd_ctx *eventfd;
  131. unsigned long threshold;
  132. };
  133. /* For threshold */
  134. struct mem_cgroup_threshold_ary {
  135. /* An array index points to threshold just below or equal to usage. */
  136. int current_threshold;
  137. /* Size of entries[] */
  138. unsigned int size;
  139. /* Array of thresholds */
  140. struct mem_cgroup_threshold entries[0];
  141. };
  142. struct mem_cgroup_thresholds {
  143. /* Primary thresholds array */
  144. struct mem_cgroup_threshold_ary *primary;
  145. /*
  146. * Spare threshold array.
  147. * This is needed to make mem_cgroup_unregister_event() "never fail".
  148. * It must be able to store at least primary->size - 1 entries.
  149. */
  150. struct mem_cgroup_threshold_ary *spare;
  151. };
  152. enum memcg_kmem_state {
  153. KMEM_NONE,
  154. KMEM_ALLOCATED,
  155. KMEM_ONLINE,
  156. };
  157. #if defined(CONFIG_SMP)
  158. struct memcg_padding {
  159. char x[0];
  160. } ____cacheline_internodealigned_in_smp;
  161. #define MEMCG_PADDING(name) struct memcg_padding name;
  162. #else
  163. #define MEMCG_PADDING(name)
  164. #endif
  165. /*
  166. * The memory controller data structure. The memory controller controls both
  167. * page cache and RSS per cgroup. We would eventually like to provide
  168. * statistics based on the statistics developed by Rik Van Riel for clock-pro,
  169. * to help the administrator determine what knobs to tune.
  170. */
  171. struct mem_cgroup {
  172. struct cgroup_subsys_state css;
  173. /* Private memcg ID. Used to ID objects that outlive the cgroup */
  174. struct mem_cgroup_id id;
  175. /* Accounted resources */
  176. struct page_counter memory;
  177. struct page_counter swap;
  178. /* Legacy consumer-oriented counters */
  179. struct page_counter memsw;
  180. struct page_counter kmem;
  181. struct page_counter tcpmem;
  182. /* Upper bound of normal memory consumption range */
  183. unsigned long high;
  184. /* Range enforcement for interrupt charges */
  185. struct work_struct high_work;
  186. unsigned long soft_limit;
  187. /* vmpressure notifications */
  188. struct vmpressure vmpressure;
  189. /*
  190. * Should the accounting and control be hierarchical, per subtree?
  191. */
  192. bool use_hierarchy;
  193. /*
  194. * Should the OOM killer kill all belonging tasks, had it kill one?
  195. */
  196. bool oom_group;
  197. /* protected by memcg_oom_lock */
  198. bool oom_lock;
  199. int under_oom;
  200. int swappiness;
  201. /* OOM-Killer disable */
  202. int oom_kill_disable;
  203. /* memory.events */
  204. struct cgroup_file events_file;
  205. /* handle for "memory.swap.events" */
  206. struct cgroup_file swap_events_file;
  207. /* protect arrays of thresholds */
  208. struct mutex thresholds_lock;
  209. /* thresholds for memory usage. RCU-protected */
  210. struct mem_cgroup_thresholds thresholds;
  211. /* thresholds for mem+swap usage. RCU-protected */
  212. struct mem_cgroup_thresholds memsw_thresholds;
  213. /* For oom notifier event fd */
  214. struct list_head oom_notify;
  215. /*
  216. * Should we move charges of a task when a task is moved into this
  217. * mem_cgroup ? And what type of charges should we move ?
  218. */
  219. unsigned long move_charge_at_immigrate;
  220. /* taken only while moving_account > 0 */
  221. spinlock_t move_lock;
  222. unsigned long move_lock_flags;
  223. MEMCG_PADDING(_pad1_);
  224. /*
  225. * set > 0 if pages under this cgroup are moving to other cgroup.
  226. */
  227. atomic_t moving_account;
  228. struct task_struct *move_lock_task;
  229. /* memory.stat */
  230. struct mem_cgroup_stat_cpu __percpu *stat_cpu;
  231. MEMCG_PADDING(_pad2_);
  232. atomic_long_t stat[MEMCG_NR_STAT];
  233. atomic_long_t events[NR_VM_EVENT_ITEMS];
  234. atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
  235. unsigned long socket_pressure;
  236. /* Legacy tcp memory accounting */
  237. bool tcpmem_active;
  238. int tcpmem_pressure;
  239. #ifdef CONFIG_MEMCG_KMEM
  240. /* Index in the kmem_cache->memcg_params.memcg_caches array */
  241. int kmemcg_id;
  242. enum memcg_kmem_state kmem_state;
  243. struct list_head kmem_caches;
  244. #endif
  245. int last_scanned_node;
  246. #if MAX_NUMNODES > 1
  247. nodemask_t scan_nodes;
  248. atomic_t numainfo_events;
  249. atomic_t numainfo_updating;
  250. #endif
  251. #ifdef CONFIG_CGROUP_WRITEBACK
  252. struct list_head cgwb_list;
  253. struct wb_domain cgwb_domain;
  254. #endif
  255. /* List of events which userspace want to receive */
  256. struct list_head event_list;
  257. spinlock_t event_list_lock;
  258. struct mem_cgroup_per_node *nodeinfo[0];
  259. /* WARNING: nodeinfo must be the last member here */
  260. };
  261. /*
  262. * size of first charge trial. "32" comes from vmscan.c's magic value.
  263. * TODO: maybe necessary to use big numbers in big irons.
  264. */
  265. #define MEMCG_CHARGE_BATCH 32U
  266. extern struct mem_cgroup *root_mem_cgroup;
  267. static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
  268. {
  269. return (memcg == root_mem_cgroup);
  270. }
  271. static inline bool mem_cgroup_disabled(void)
  272. {
  273. return !cgroup_subsys_enabled(memory_cgrp_subsys);
  274. }
  275. enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
  276. struct mem_cgroup *memcg);
  277. int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
  278. gfp_t gfp_mask, struct mem_cgroup **memcgp,
  279. bool compound);
  280. int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
  281. gfp_t gfp_mask, struct mem_cgroup **memcgp,
  282. bool compound);
  283. void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
  284. bool lrucare, bool compound);
  285. void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
  286. bool compound);
  287. void mem_cgroup_uncharge(struct page *page);
  288. void mem_cgroup_uncharge_list(struct list_head *page_list);
  289. void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
  290. static struct mem_cgroup_per_node *
  291. mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
  292. {
  293. return memcg->nodeinfo[nid];
  294. }
  295. /**
  296. * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
  297. * @node: node of the wanted lruvec
  298. * @memcg: memcg of the wanted lruvec
  299. *
  300. * Returns the lru list vector holding pages for a given @node or a given
  301. * @memcg and @zone. This can be the node lruvec, if the memory controller
  302. * is disabled.
  303. */
  304. static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
  305. struct mem_cgroup *memcg)
  306. {
  307. struct mem_cgroup_per_node *mz;
  308. struct lruvec *lruvec;
  309. if (mem_cgroup_disabled()) {
  310. lruvec = node_lruvec(pgdat);
  311. goto out;
  312. }
  313. mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
  314. lruvec = &mz->lruvec;
  315. out:
  316. /*
  317. * Since a node can be onlined after the mem_cgroup was created,
  318. * we have to be prepared to initialize lruvec->pgdat here;
  319. * and if offlined then reonlined, we need to reinitialize it.
  320. */
  321. if (unlikely(lruvec->pgdat != pgdat))
  322. lruvec->pgdat = pgdat;
  323. return lruvec;
  324. }
  325. struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
  326. bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
  327. struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
  328. struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
  329. struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
  330. static inline
  331. struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
  332. return css ? container_of(css, struct mem_cgroup, css) : NULL;
  333. }
  334. static inline void mem_cgroup_put(struct mem_cgroup *memcg)
  335. {
  336. if (memcg)
  337. css_put(&memcg->css);
  338. }
  339. #define mem_cgroup_from_counter(counter, member) \
  340. container_of(counter, struct mem_cgroup, member)
  341. struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
  342. struct mem_cgroup *,
  343. struct mem_cgroup_reclaim_cookie *);
  344. void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
  345. int mem_cgroup_scan_tasks(struct mem_cgroup *,
  346. int (*)(struct task_struct *, void *), void *);
  347. static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
  348. {
  349. if (mem_cgroup_disabled())
  350. return 0;
  351. return memcg->id.id;
  352. }
  353. struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
  354. static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
  355. {
  356. struct mem_cgroup_per_node *mz;
  357. if (mem_cgroup_disabled())
  358. return NULL;
  359. mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  360. return mz->memcg;
  361. }
  362. /**
  363. * parent_mem_cgroup - find the accounting parent of a memcg
  364. * @memcg: memcg whose parent to find
  365. *
  366. * Returns the parent memcg, or NULL if this is the root or the memory
  367. * controller is in legacy no-hierarchy mode.
  368. */
  369. static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
  370. {
  371. if (!memcg->memory.parent)
  372. return NULL;
  373. return mem_cgroup_from_counter(memcg->memory.parent, memory);
  374. }
  375. static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
  376. struct mem_cgroup *root)
  377. {
  378. if (root == memcg)
  379. return true;
  380. if (!root->use_hierarchy)
  381. return false;
  382. return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
  383. }
  384. static inline bool mm_match_cgroup(struct mm_struct *mm,
  385. struct mem_cgroup *memcg)
  386. {
  387. struct mem_cgroup *task_memcg;
  388. bool match = false;
  389. rcu_read_lock();
  390. task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  391. if (task_memcg)
  392. match = mem_cgroup_is_descendant(task_memcg, memcg);
  393. rcu_read_unlock();
  394. return match;
  395. }
  396. struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
  397. ino_t page_cgroup_ino(struct page *page);
  398. static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
  399. {
  400. if (mem_cgroup_disabled())
  401. return true;
  402. return !!(memcg->css.flags & CSS_ONLINE);
  403. }
  404. /*
  405. * For memory reclaim.
  406. */
  407. int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
  408. void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
  409. int zid, int nr_pages);
  410. unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
  411. int nid, unsigned int lru_mask);
  412. static inline
  413. unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
  414. {
  415. struct mem_cgroup_per_node *mz;
  416. unsigned long nr_pages = 0;
  417. int zid;
  418. mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  419. for (zid = 0; zid < MAX_NR_ZONES; zid++)
  420. nr_pages += mz->lru_zone_size[zid][lru];
  421. return nr_pages;
  422. }
  423. static inline
  424. unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
  425. enum lru_list lru, int zone_idx)
  426. {
  427. struct mem_cgroup_per_node *mz;
  428. mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  429. return mz->lru_zone_size[zone_idx][lru];
  430. }
  431. void mem_cgroup_handle_over_high(void);
  432. unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
  433. void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
  434. struct task_struct *p);
  435. static inline void mem_cgroup_enter_user_fault(void)
  436. {
  437. WARN_ON(current->in_user_fault);
  438. current->in_user_fault = 1;
  439. }
  440. static inline void mem_cgroup_exit_user_fault(void)
  441. {
  442. WARN_ON(!current->in_user_fault);
  443. current->in_user_fault = 0;
  444. }
  445. static inline bool task_in_memcg_oom(struct task_struct *p)
  446. {
  447. return p->memcg_in_oom;
  448. }
  449. bool mem_cgroup_oom_synchronize(bool wait);
  450. struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
  451. struct mem_cgroup *oom_domain);
  452. void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
  453. #ifdef CONFIG_MEMCG_SWAP
  454. extern int do_swap_account;
  455. #endif
  456. struct mem_cgroup *lock_page_memcg(struct page *page);
  457. void __unlock_page_memcg(struct mem_cgroup *memcg);
  458. void unlock_page_memcg(struct page *page);
  459. /*
  460. * idx can be of type enum memcg_stat_item or node_stat_item.
  461. * Keep in sync with memcg_exact_page_state().
  462. */
  463. static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
  464. int idx)
  465. {
  466. long x = atomic_long_read(&memcg->stat[idx]);
  467. #ifdef CONFIG_SMP
  468. if (x < 0)
  469. x = 0;
  470. #endif
  471. return x;
  472. }
  473. /* idx can be of type enum memcg_stat_item or node_stat_item */
  474. static inline void __mod_memcg_state(struct mem_cgroup *memcg,
  475. int idx, int val)
  476. {
  477. long x;
  478. if (mem_cgroup_disabled())
  479. return;
  480. x = val + __this_cpu_read(memcg->stat_cpu->count[idx]);
  481. if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
  482. atomic_long_add(x, &memcg->stat[idx]);
  483. x = 0;
  484. }
  485. __this_cpu_write(memcg->stat_cpu->count[idx], x);
  486. }
  487. /* idx can be of type enum memcg_stat_item or node_stat_item */
  488. static inline void mod_memcg_state(struct mem_cgroup *memcg,
  489. int idx, int val)
  490. {
  491. unsigned long flags;
  492. local_irq_save(flags);
  493. __mod_memcg_state(memcg, idx, val);
  494. local_irq_restore(flags);
  495. }
  496. /**
  497. * mod_memcg_page_state - update page state statistics
  498. * @page: the page
  499. * @idx: page state item to account
  500. * @val: number of pages (positive or negative)
  501. *
  502. * The @page must be locked or the caller must use lock_page_memcg()
  503. * to prevent double accounting when the page is concurrently being
  504. * moved to another memcg:
  505. *
  506. * lock_page(page) or lock_page_memcg(page)
  507. * if (TestClearPageState(page))
  508. * mod_memcg_page_state(page, state, -1);
  509. * unlock_page(page) or unlock_page_memcg(page)
  510. *
  511. * Kernel pages are an exception to this, since they'll never move.
  512. */
  513. static inline void __mod_memcg_page_state(struct page *page,
  514. int idx, int val)
  515. {
  516. if (page->mem_cgroup)
  517. __mod_memcg_state(page->mem_cgroup, idx, val);
  518. }
  519. static inline void mod_memcg_page_state(struct page *page,
  520. int idx, int val)
  521. {
  522. if (page->mem_cgroup)
  523. mod_memcg_state(page->mem_cgroup, idx, val);
  524. }
  525. static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
  526. enum node_stat_item idx)
  527. {
  528. struct mem_cgroup_per_node *pn;
  529. long x;
  530. if (mem_cgroup_disabled())
  531. return node_page_state(lruvec_pgdat(lruvec), idx);
  532. pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  533. x = atomic_long_read(&pn->lruvec_stat[idx]);
  534. #ifdef CONFIG_SMP
  535. if (x < 0)
  536. x = 0;
  537. #endif
  538. return x;
  539. }
  540. static inline void __mod_lruvec_state(struct lruvec *lruvec,
  541. enum node_stat_item idx, int val)
  542. {
  543. struct mem_cgroup_per_node *pn;
  544. long x;
  545. /* Update node */
  546. __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  547. if (mem_cgroup_disabled())
  548. return;
  549. pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  550. /* Update memcg */
  551. __mod_memcg_state(pn->memcg, idx, val);
  552. /* Update lruvec */
  553. x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
  554. if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
  555. atomic_long_add(x, &pn->lruvec_stat[idx]);
  556. x = 0;
  557. }
  558. __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
  559. }
  560. static inline void mod_lruvec_state(struct lruvec *lruvec,
  561. enum node_stat_item idx, int val)
  562. {
  563. unsigned long flags;
  564. local_irq_save(flags);
  565. __mod_lruvec_state(lruvec, idx, val);
  566. local_irq_restore(flags);
  567. }
  568. static inline void __mod_lruvec_page_state(struct page *page,
  569. enum node_stat_item idx, int val)
  570. {
  571. pg_data_t *pgdat = page_pgdat(page);
  572. struct lruvec *lruvec;
  573. /* Untracked pages have no memcg, no lruvec. Update only the node */
  574. if (!page->mem_cgroup) {
  575. __mod_node_page_state(pgdat, idx, val);
  576. return;
  577. }
  578. lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
  579. __mod_lruvec_state(lruvec, idx, val);
  580. }
  581. static inline void mod_lruvec_page_state(struct page *page,
  582. enum node_stat_item idx, int val)
  583. {
  584. unsigned long flags;
  585. local_irq_save(flags);
  586. __mod_lruvec_page_state(page, idx, val);
  587. local_irq_restore(flags);
  588. }
  589. unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
  590. gfp_t gfp_mask,
  591. unsigned long *total_scanned);
  592. static inline void __count_memcg_events(struct mem_cgroup *memcg,
  593. enum vm_event_item idx,
  594. unsigned long count)
  595. {
  596. unsigned long x;
  597. if (mem_cgroup_disabled())
  598. return;
  599. x = count + __this_cpu_read(memcg->stat_cpu->events[idx]);
  600. if (unlikely(x > MEMCG_CHARGE_BATCH)) {
  601. atomic_long_add(x, &memcg->events[idx]);
  602. x = 0;
  603. }
  604. __this_cpu_write(memcg->stat_cpu->events[idx], x);
  605. }
  606. static inline void count_memcg_events(struct mem_cgroup *memcg,
  607. enum vm_event_item idx,
  608. unsigned long count)
  609. {
  610. unsigned long flags;
  611. local_irq_save(flags);
  612. __count_memcg_events(memcg, idx, count);
  613. local_irq_restore(flags);
  614. }
  615. static inline void count_memcg_page_event(struct page *page,
  616. enum vm_event_item idx)
  617. {
  618. if (page->mem_cgroup)
  619. count_memcg_events(page->mem_cgroup, idx, 1);
  620. }
  621. static inline void count_memcg_event_mm(struct mm_struct *mm,
  622. enum vm_event_item idx)
  623. {
  624. struct mem_cgroup *memcg;
  625. if (mem_cgroup_disabled())
  626. return;
  627. rcu_read_lock();
  628. memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  629. if (likely(memcg))
  630. count_memcg_events(memcg, idx, 1);
  631. rcu_read_unlock();
  632. }
  633. static inline void memcg_memory_event(struct mem_cgroup *memcg,
  634. enum memcg_memory_event event)
  635. {
  636. atomic_long_inc(&memcg->memory_events[event]);
  637. cgroup_file_notify(&memcg->events_file);
  638. }
  639. static inline void memcg_memory_event_mm(struct mm_struct *mm,
  640. enum memcg_memory_event event)
  641. {
  642. struct mem_cgroup *memcg;
  643. if (mem_cgroup_disabled())
  644. return;
  645. rcu_read_lock();
  646. memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  647. if (likely(memcg))
  648. memcg_memory_event(memcg, event);
  649. rcu_read_unlock();
  650. }
  651. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  652. void mem_cgroup_split_huge_fixup(struct page *head);
  653. #endif
  654. #else /* CONFIG_MEMCG */
  655. #define MEM_CGROUP_ID_SHIFT 0
  656. #define MEM_CGROUP_ID_MAX 0
  657. struct mem_cgroup;
  658. static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
  659. {
  660. return true;
  661. }
  662. static inline bool mem_cgroup_disabled(void)
  663. {
  664. return true;
  665. }
  666. static inline void memcg_memory_event(struct mem_cgroup *memcg,
  667. enum memcg_memory_event event)
  668. {
  669. }
  670. static inline void memcg_memory_event_mm(struct mm_struct *mm,
  671. enum memcg_memory_event event)
  672. {
  673. }
  674. static inline enum mem_cgroup_protection mem_cgroup_protected(
  675. struct mem_cgroup *root, struct mem_cgroup *memcg)
  676. {
  677. return MEMCG_PROT_NONE;
  678. }
  679. static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
  680. gfp_t gfp_mask,
  681. struct mem_cgroup **memcgp,
  682. bool compound)
  683. {
  684. *memcgp = NULL;
  685. return 0;
  686. }
  687. static inline int mem_cgroup_try_charge_delay(struct page *page,
  688. struct mm_struct *mm,
  689. gfp_t gfp_mask,
  690. struct mem_cgroup **memcgp,
  691. bool compound)
  692. {
  693. *memcgp = NULL;
  694. return 0;
  695. }
  696. static inline void mem_cgroup_commit_charge(struct page *page,
  697. struct mem_cgroup *memcg,
  698. bool lrucare, bool compound)
  699. {
  700. }
  701. static inline void mem_cgroup_cancel_charge(struct page *page,
  702. struct mem_cgroup *memcg,
  703. bool compound)
  704. {
  705. }
  706. static inline void mem_cgroup_uncharge(struct page *page)
  707. {
  708. }
  709. static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
  710. {
  711. }
  712. static inline void mem_cgroup_migrate(struct page *old, struct page *new)
  713. {
  714. }
  715. static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
  716. struct mem_cgroup *memcg)
  717. {
  718. return node_lruvec(pgdat);
  719. }
  720. static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
  721. struct pglist_data *pgdat)
  722. {
  723. return &pgdat->lruvec;
  724. }
  725. static inline bool mm_match_cgroup(struct mm_struct *mm,
  726. struct mem_cgroup *memcg)
  727. {
  728. return true;
  729. }
  730. static inline bool task_in_mem_cgroup(struct task_struct *task,
  731. const struct mem_cgroup *memcg)
  732. {
  733. return true;
  734. }
  735. static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
  736. {
  737. return NULL;
  738. }
  739. static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
  740. {
  741. return NULL;
  742. }
  743. static inline void mem_cgroup_put(struct mem_cgroup *memcg)
  744. {
  745. }
  746. static inline struct mem_cgroup *
  747. mem_cgroup_iter(struct mem_cgroup *root,
  748. struct mem_cgroup *prev,
  749. struct mem_cgroup_reclaim_cookie *reclaim)
  750. {
  751. return NULL;
  752. }
  753. static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
  754. struct mem_cgroup *prev)
  755. {
  756. }
  757. static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
  758. int (*fn)(struct task_struct *, void *), void *arg)
  759. {
  760. return 0;
  761. }
  762. static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
  763. {
  764. return 0;
  765. }
  766. static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
  767. {
  768. WARN_ON_ONCE(id);
  769. /* XXX: This should always return root_mem_cgroup */
  770. return NULL;
  771. }
  772. static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
  773. {
  774. return NULL;
  775. }
  776. static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
  777. {
  778. return true;
  779. }
  780. static inline unsigned long
  781. mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
  782. {
  783. return 0;
  784. }
  785. static inline
  786. unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
  787. enum lru_list lru, int zone_idx)
  788. {
  789. return 0;
  790. }
  791. static inline unsigned long
  792. mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
  793. int nid, unsigned int lru_mask)
  794. {
  795. return 0;
  796. }
  797. static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
  798. {
  799. return 0;
  800. }
  801. static inline void
  802. mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
  803. {
  804. }
  805. static inline struct mem_cgroup *lock_page_memcg(struct page *page)
  806. {
  807. return NULL;
  808. }
  809. static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
  810. {
  811. }
  812. static inline void unlock_page_memcg(struct page *page)
  813. {
  814. }
  815. static inline void mem_cgroup_handle_over_high(void)
  816. {
  817. }
  818. static inline void mem_cgroup_enter_user_fault(void)
  819. {
  820. }
  821. static inline void mem_cgroup_exit_user_fault(void)
  822. {
  823. }
  824. static inline bool task_in_memcg_oom(struct task_struct *p)
  825. {
  826. return false;
  827. }
  828. static inline bool mem_cgroup_oom_synchronize(bool wait)
  829. {
  830. return false;
  831. }
  832. static inline struct mem_cgroup *mem_cgroup_get_oom_group(
  833. struct task_struct *victim, struct mem_cgroup *oom_domain)
  834. {
  835. return NULL;
  836. }
  837. static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
  838. {
  839. }
  840. static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
  841. int idx)
  842. {
  843. return 0;
  844. }
  845. static inline void __mod_memcg_state(struct mem_cgroup *memcg,
  846. int idx,
  847. int nr)
  848. {
  849. }
  850. static inline void mod_memcg_state(struct mem_cgroup *memcg,
  851. int idx,
  852. int nr)
  853. {
  854. }
  855. static inline void __mod_memcg_page_state(struct page *page,
  856. int idx,
  857. int nr)
  858. {
  859. }
  860. static inline void mod_memcg_page_state(struct page *page,
  861. int idx,
  862. int nr)
  863. {
  864. }
  865. static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
  866. enum node_stat_item idx)
  867. {
  868. return node_page_state(lruvec_pgdat(lruvec), idx);
  869. }
  870. static inline void __mod_lruvec_state(struct lruvec *lruvec,
  871. enum node_stat_item idx, int val)
  872. {
  873. __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  874. }
  875. static inline void mod_lruvec_state(struct lruvec *lruvec,
  876. enum node_stat_item idx, int val)
  877. {
  878. mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  879. }
  880. static inline void __mod_lruvec_page_state(struct page *page,
  881. enum node_stat_item idx, int val)
  882. {
  883. __mod_node_page_state(page_pgdat(page), idx, val);
  884. }
  885. static inline void mod_lruvec_page_state(struct page *page,
  886. enum node_stat_item idx, int val)
  887. {
  888. mod_node_page_state(page_pgdat(page), idx, val);
  889. }
  890. static inline
  891. unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
  892. gfp_t gfp_mask,
  893. unsigned long *total_scanned)
  894. {
  895. return 0;
  896. }
  897. static inline void mem_cgroup_split_huge_fixup(struct page *head)
  898. {
  899. }
  900. static inline void count_memcg_events(struct mem_cgroup *memcg,
  901. enum vm_event_item idx,
  902. unsigned long count)
  903. {
  904. }
  905. static inline void count_memcg_page_event(struct page *page,
  906. int idx)
  907. {
  908. }
  909. static inline
  910. void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
  911. {
  912. }
  913. #endif /* CONFIG_MEMCG */
  914. /* idx can be of type enum memcg_stat_item or node_stat_item */
  915. static inline void __inc_memcg_state(struct mem_cgroup *memcg,
  916. int idx)
  917. {
  918. __mod_memcg_state(memcg, idx, 1);
  919. }
  920. /* idx can be of type enum memcg_stat_item or node_stat_item */
  921. static inline void __dec_memcg_state(struct mem_cgroup *memcg,
  922. int idx)
  923. {
  924. __mod_memcg_state(memcg, idx, -1);
  925. }
  926. /* idx can be of type enum memcg_stat_item or node_stat_item */
  927. static inline void __inc_memcg_page_state(struct page *page,
  928. int idx)
  929. {
  930. __mod_memcg_page_state(page, idx, 1);
  931. }
  932. /* idx can be of type enum memcg_stat_item or node_stat_item */
  933. static inline void __dec_memcg_page_state(struct page *page,
  934. int idx)
  935. {
  936. __mod_memcg_page_state(page, idx, -1);
  937. }
  938. static inline void __inc_lruvec_state(struct lruvec *lruvec,
  939. enum node_stat_item idx)
  940. {
  941. __mod_lruvec_state(lruvec, idx, 1);
  942. }
  943. static inline void __dec_lruvec_state(struct lruvec *lruvec,
  944. enum node_stat_item idx)
  945. {
  946. __mod_lruvec_state(lruvec, idx, -1);
  947. }
  948. static inline void __inc_lruvec_page_state(struct page *page,
  949. enum node_stat_item idx)
  950. {
  951. __mod_lruvec_page_state(page, idx, 1);
  952. }
  953. static inline void __dec_lruvec_page_state(struct page *page,
  954. enum node_stat_item idx)
  955. {
  956. __mod_lruvec_page_state(page, idx, -1);
  957. }
  958. /* idx can be of type enum memcg_stat_item or node_stat_item */
  959. static inline void inc_memcg_state(struct mem_cgroup *memcg,
  960. int idx)
  961. {
  962. mod_memcg_state(memcg, idx, 1);
  963. }
  964. /* idx can be of type enum memcg_stat_item or node_stat_item */
  965. static inline void dec_memcg_state(struct mem_cgroup *memcg,
  966. int idx)
  967. {
  968. mod_memcg_state(memcg, idx, -1);
  969. }
  970. /* idx can be of type enum memcg_stat_item or node_stat_item */
  971. static inline void inc_memcg_page_state(struct page *page,
  972. int idx)
  973. {
  974. mod_memcg_page_state(page, idx, 1);
  975. }
  976. /* idx can be of type enum memcg_stat_item or node_stat_item */
  977. static inline void dec_memcg_page_state(struct page *page,
  978. int idx)
  979. {
  980. mod_memcg_page_state(page, idx, -1);
  981. }
  982. static inline void inc_lruvec_state(struct lruvec *lruvec,
  983. enum node_stat_item idx)
  984. {
  985. mod_lruvec_state(lruvec, idx, 1);
  986. }
  987. static inline void dec_lruvec_state(struct lruvec *lruvec,
  988. enum node_stat_item idx)
  989. {
  990. mod_lruvec_state(lruvec, idx, -1);
  991. }
  992. static inline void inc_lruvec_page_state(struct page *page,
  993. enum node_stat_item idx)
  994. {
  995. mod_lruvec_page_state(page, idx, 1);
  996. }
  997. static inline void dec_lruvec_page_state(struct page *page,
  998. enum node_stat_item idx)
  999. {
  1000. mod_lruvec_page_state(page, idx, -1);
  1001. }
  1002. #ifdef CONFIG_CGROUP_WRITEBACK
  1003. struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
  1004. void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
  1005. unsigned long *pheadroom, unsigned long *pdirty,
  1006. unsigned long *pwriteback);
  1007. #else /* CONFIG_CGROUP_WRITEBACK */
  1008. static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
  1009. {
  1010. return NULL;
  1011. }
  1012. static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
  1013. unsigned long *pfilepages,
  1014. unsigned long *pheadroom,
  1015. unsigned long *pdirty,
  1016. unsigned long *pwriteback)
  1017. {
  1018. }
  1019. #endif /* CONFIG_CGROUP_WRITEBACK */
  1020. struct sock;
  1021. bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
  1022. void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
  1023. #ifdef CONFIG_MEMCG
  1024. extern struct static_key_false memcg_sockets_enabled_key;
  1025. #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
  1026. void mem_cgroup_sk_alloc(struct sock *sk);
  1027. void mem_cgroup_sk_free(struct sock *sk);
  1028. static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
  1029. {
  1030. if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
  1031. return true;
  1032. do {
  1033. if (time_before(jiffies, memcg->socket_pressure))
  1034. return true;
  1035. } while ((memcg = parent_mem_cgroup(memcg)));
  1036. return false;
  1037. }
  1038. #else
  1039. #define mem_cgroup_sockets_enabled 0
  1040. static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
  1041. static inline void mem_cgroup_sk_free(struct sock *sk) { };
  1042. static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
  1043. {
  1044. return false;
  1045. }
  1046. #endif
  1047. struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
  1048. void memcg_kmem_put_cache(struct kmem_cache *cachep);
  1049. int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
  1050. struct mem_cgroup *memcg);
  1051. int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
  1052. void memcg_kmem_uncharge(struct page *page, int order);
  1053. #ifdef CONFIG_MEMCG_KMEM
  1054. extern struct static_key_false memcg_kmem_enabled_key;
  1055. extern struct workqueue_struct *memcg_kmem_cache_wq;
  1056. extern int memcg_nr_cache_ids;
  1057. void memcg_get_cache_ids(void);
  1058. void memcg_put_cache_ids(void);
  1059. /*
  1060. * Helper macro to loop through all memcg-specific caches. Callers must still
  1061. * check if the cache is valid (it is either valid or NULL).
  1062. * the slab_mutex must be held when looping through those caches
  1063. */
  1064. #define for_each_memcg_cache_index(_idx) \
  1065. for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
  1066. static inline bool memcg_kmem_enabled(void)
  1067. {
  1068. return static_branch_unlikely(&memcg_kmem_enabled_key);
  1069. }
  1070. /*
  1071. * helper for accessing a memcg's index. It will be used as an index in the
  1072. * child cache array in kmem_cache, and also to derive its name. This function
  1073. * will return -1 when this is not a kmem-limited memcg.
  1074. */
  1075. static inline int memcg_cache_id(struct mem_cgroup *memcg)
  1076. {
  1077. return memcg ? memcg->kmemcg_id : -1;
  1078. }
  1079. extern int memcg_expand_shrinker_maps(int new_id);
  1080. extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
  1081. int nid, int shrinker_id);
  1082. #else
  1083. #define for_each_memcg_cache_index(_idx) \
  1084. for (; NULL; )
  1085. static inline bool memcg_kmem_enabled(void)
  1086. {
  1087. return false;
  1088. }
  1089. static inline int memcg_cache_id(struct mem_cgroup *memcg)
  1090. {
  1091. return -1;
  1092. }
  1093. static inline void memcg_get_cache_ids(void)
  1094. {
  1095. }
  1096. static inline void memcg_put_cache_ids(void)
  1097. {
  1098. }
  1099. static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
  1100. int nid, int shrinker_id) { }
  1101. #endif /* CONFIG_MEMCG_KMEM */
  1102. #endif /* _LINUX_MEMCONTROL_H */