list_lru.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. /*
  2. * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  3. * Authors: David Chinner and Glauber Costa
  4. *
  5. * Generic LRU infrastructure
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/list_lru.h>
  11. #include <linux/slab.h>
  12. #include <linux/mutex.h>
  13. #include <linux/memcontrol.h>
  14. #ifdef CONFIG_MEMCG_KMEM
  15. static LIST_HEAD(list_lrus);
  16. static DEFINE_MUTEX(list_lrus_mutex);
  17. static void list_lru_register(struct list_lru *lru)
  18. {
  19. mutex_lock(&list_lrus_mutex);
  20. list_add(&lru->list, &list_lrus);
  21. mutex_unlock(&list_lrus_mutex);
  22. }
  23. static void list_lru_unregister(struct list_lru *lru)
  24. {
  25. mutex_lock(&list_lrus_mutex);
  26. list_del(&lru->list);
  27. mutex_unlock(&list_lrus_mutex);
  28. }
  29. static int lru_shrinker_id(struct list_lru *lru)
  30. {
  31. return lru->shrinker_id;
  32. }
  33. static inline bool list_lru_memcg_aware(struct list_lru *lru)
  34. {
  35. return lru->memcg_aware;
  36. }
  37. static inline struct list_lru_one *
  38. list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  39. {
  40. struct list_lru_memcg *memcg_lrus;
  41. /*
  42. * Either lock or RCU protects the array of per cgroup lists
  43. * from relocation (see memcg_update_list_lru_node).
  44. */
  45. memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
  46. lockdep_is_held(&nlru->lock));
  47. if (memcg_lrus && idx >= 0)
  48. return memcg_lrus->lru[idx];
  49. return &nlru->lru;
  50. }
  51. static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
  52. {
  53. struct page *page;
  54. if (!memcg_kmem_enabled())
  55. return NULL;
  56. page = virt_to_head_page(ptr);
  57. return page->mem_cgroup;
  58. }
  59. static inline struct list_lru_one *
  60. list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
  61. struct mem_cgroup **memcg_ptr)
  62. {
  63. struct list_lru_one *l = &nlru->lru;
  64. struct mem_cgroup *memcg = NULL;
  65. if (!nlru->memcg_lrus)
  66. goto out;
  67. memcg = mem_cgroup_from_kmem(ptr);
  68. if (!memcg)
  69. goto out;
  70. l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
  71. out:
  72. if (memcg_ptr)
  73. *memcg_ptr = memcg;
  74. return l;
  75. }
  76. #else
  77. static void list_lru_register(struct list_lru *lru)
  78. {
  79. }
  80. static void list_lru_unregister(struct list_lru *lru)
  81. {
  82. }
  83. static int lru_shrinker_id(struct list_lru *lru)
  84. {
  85. return -1;
  86. }
  87. static inline bool list_lru_memcg_aware(struct list_lru *lru)
  88. {
  89. return false;
  90. }
  91. static inline struct list_lru_one *
  92. list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  93. {
  94. return &nlru->lru;
  95. }
  96. static inline struct list_lru_one *
  97. list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
  98. struct mem_cgroup **memcg_ptr)
  99. {
  100. if (memcg_ptr)
  101. *memcg_ptr = NULL;
  102. return &nlru->lru;
  103. }
  104. #endif /* CONFIG_MEMCG_KMEM */
  105. bool list_lru_add(struct list_lru *lru, struct list_head *item)
  106. {
  107. int nid = page_to_nid(virt_to_page(item));
  108. struct list_lru_node *nlru = &lru->node[nid];
  109. struct mem_cgroup *memcg;
  110. struct list_lru_one *l;
  111. spin_lock(&nlru->lock);
  112. if (list_empty(item)) {
  113. l = list_lru_from_kmem(nlru, item, &memcg);
  114. list_add_tail(item, &l->list);
  115. /* Set shrinker bit if the first element was added */
  116. if (!l->nr_items++)
  117. memcg_set_shrinker_bit(memcg, nid,
  118. lru_shrinker_id(lru));
  119. nlru->nr_items++;
  120. spin_unlock(&nlru->lock);
  121. return true;
  122. }
  123. spin_unlock(&nlru->lock);
  124. return false;
  125. }
  126. EXPORT_SYMBOL_GPL(list_lru_add);
  127. bool list_lru_del(struct list_lru *lru, struct list_head *item)
  128. {
  129. int nid = page_to_nid(virt_to_page(item));
  130. struct list_lru_node *nlru = &lru->node[nid];
  131. struct list_lru_one *l;
  132. spin_lock(&nlru->lock);
  133. if (!list_empty(item)) {
  134. l = list_lru_from_kmem(nlru, item, NULL);
  135. list_del_init(item);
  136. l->nr_items--;
  137. nlru->nr_items--;
  138. spin_unlock(&nlru->lock);
  139. return true;
  140. }
  141. spin_unlock(&nlru->lock);
  142. return false;
  143. }
  144. EXPORT_SYMBOL_GPL(list_lru_del);
  145. void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
  146. {
  147. list_del_init(item);
  148. list->nr_items--;
  149. }
  150. EXPORT_SYMBOL_GPL(list_lru_isolate);
  151. void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
  152. struct list_head *head)
  153. {
  154. list_move(item, head);
  155. list->nr_items--;
  156. }
  157. EXPORT_SYMBOL_GPL(list_lru_isolate_move);
  158. unsigned long list_lru_count_one(struct list_lru *lru,
  159. int nid, struct mem_cgroup *memcg)
  160. {
  161. struct list_lru_node *nlru = &lru->node[nid];
  162. struct list_lru_one *l;
  163. unsigned long count;
  164. rcu_read_lock();
  165. l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
  166. count = l->nr_items;
  167. rcu_read_unlock();
  168. return count;
  169. }
  170. EXPORT_SYMBOL_GPL(list_lru_count_one);
  171. unsigned long list_lru_count_node(struct list_lru *lru, int nid)
  172. {
  173. struct list_lru_node *nlru;
  174. nlru = &lru->node[nid];
  175. return nlru->nr_items;
  176. }
  177. EXPORT_SYMBOL_GPL(list_lru_count_node);
  178. static unsigned long
  179. __list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
  180. list_lru_walk_cb isolate, void *cb_arg,
  181. unsigned long *nr_to_walk)
  182. {
  183. struct list_lru_one *l;
  184. struct list_head *item, *n;
  185. unsigned long isolated = 0;
  186. l = list_lru_from_memcg_idx(nlru, memcg_idx);
  187. restart:
  188. list_for_each_safe(item, n, &l->list) {
  189. enum lru_status ret;
  190. /*
  191. * decrement nr_to_walk first so that we don't livelock if we
  192. * get stuck on large numbesr of LRU_RETRY items
  193. */
  194. if (!*nr_to_walk)
  195. break;
  196. --*nr_to_walk;
  197. ret = isolate(item, l, &nlru->lock, cb_arg);
  198. switch (ret) {
  199. case LRU_REMOVED_RETRY:
  200. assert_spin_locked(&nlru->lock);
  201. /* fall through */
  202. case LRU_REMOVED:
  203. isolated++;
  204. nlru->nr_items--;
  205. /*
  206. * If the lru lock has been dropped, our list
  207. * traversal is now invalid and so we have to
  208. * restart from scratch.
  209. */
  210. if (ret == LRU_REMOVED_RETRY)
  211. goto restart;
  212. break;
  213. case LRU_ROTATE:
  214. list_move_tail(item, &l->list);
  215. break;
  216. case LRU_SKIP:
  217. break;
  218. case LRU_RETRY:
  219. /*
  220. * The lru lock has been dropped, our list traversal is
  221. * now invalid and so we have to restart from scratch.
  222. */
  223. assert_spin_locked(&nlru->lock);
  224. goto restart;
  225. default:
  226. BUG();
  227. }
  228. }
  229. return isolated;
  230. }
  231. unsigned long
  232. list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
  233. list_lru_walk_cb isolate, void *cb_arg,
  234. unsigned long *nr_to_walk)
  235. {
  236. struct list_lru_node *nlru = &lru->node[nid];
  237. unsigned long ret;
  238. spin_lock(&nlru->lock);
  239. ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
  240. nr_to_walk);
  241. spin_unlock(&nlru->lock);
  242. return ret;
  243. }
  244. EXPORT_SYMBOL_GPL(list_lru_walk_one);
  245. unsigned long
  246. list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
  247. list_lru_walk_cb isolate, void *cb_arg,
  248. unsigned long *nr_to_walk)
  249. {
  250. struct list_lru_node *nlru = &lru->node[nid];
  251. unsigned long ret;
  252. spin_lock_irq(&nlru->lock);
  253. ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
  254. nr_to_walk);
  255. spin_unlock_irq(&nlru->lock);
  256. return ret;
  257. }
  258. unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
  259. list_lru_walk_cb isolate, void *cb_arg,
  260. unsigned long *nr_to_walk)
  261. {
  262. long isolated = 0;
  263. int memcg_idx;
  264. isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
  265. nr_to_walk);
  266. if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
  267. for_each_memcg_cache_index(memcg_idx) {
  268. struct list_lru_node *nlru = &lru->node[nid];
  269. spin_lock(&nlru->lock);
  270. isolated += __list_lru_walk_one(nlru, memcg_idx,
  271. isolate, cb_arg,
  272. nr_to_walk);
  273. spin_unlock(&nlru->lock);
  274. if (*nr_to_walk <= 0)
  275. break;
  276. }
  277. }
  278. return isolated;
  279. }
  280. EXPORT_SYMBOL_GPL(list_lru_walk_node);
  281. static void init_one_lru(struct list_lru_one *l)
  282. {
  283. INIT_LIST_HEAD(&l->list);
  284. l->nr_items = 0;
  285. }
  286. #ifdef CONFIG_MEMCG_KMEM
  287. static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
  288. int begin, int end)
  289. {
  290. int i;
  291. for (i = begin; i < end; i++)
  292. kfree(memcg_lrus->lru[i]);
  293. }
  294. static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
  295. int begin, int end)
  296. {
  297. int i;
  298. for (i = begin; i < end; i++) {
  299. struct list_lru_one *l;
  300. l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
  301. if (!l)
  302. goto fail;
  303. init_one_lru(l);
  304. memcg_lrus->lru[i] = l;
  305. }
  306. return 0;
  307. fail:
  308. __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
  309. return -ENOMEM;
  310. }
  311. static int memcg_init_list_lru_node(struct list_lru_node *nlru)
  312. {
  313. struct list_lru_memcg *memcg_lrus;
  314. int size = memcg_nr_cache_ids;
  315. memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
  316. size * sizeof(void *), GFP_KERNEL);
  317. if (!memcg_lrus)
  318. return -ENOMEM;
  319. if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
  320. kvfree(memcg_lrus);
  321. return -ENOMEM;
  322. }
  323. RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
  324. return 0;
  325. }
  326. static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
  327. {
  328. struct list_lru_memcg *memcg_lrus;
  329. /*
  330. * This is called when shrinker has already been unregistered,
  331. * and nobody can use it. So, there is no need to use kvfree_rcu().
  332. */
  333. memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
  334. __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
  335. kvfree(memcg_lrus);
  336. }
  337. static void kvfree_rcu(struct rcu_head *head)
  338. {
  339. struct list_lru_memcg *mlru;
  340. mlru = container_of(head, struct list_lru_memcg, rcu);
  341. kvfree(mlru);
  342. }
  343. static int memcg_update_list_lru_node(struct list_lru_node *nlru,
  344. int old_size, int new_size)
  345. {
  346. struct list_lru_memcg *old, *new;
  347. BUG_ON(old_size > new_size);
  348. old = rcu_dereference_protected(nlru->memcg_lrus,
  349. lockdep_is_held(&list_lrus_mutex));
  350. new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
  351. if (!new)
  352. return -ENOMEM;
  353. if (__memcg_init_list_lru_node(new, old_size, new_size)) {
  354. kvfree(new);
  355. return -ENOMEM;
  356. }
  357. memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
  358. /*
  359. * The locking below allows readers that hold nlru->lock avoid taking
  360. * rcu_read_lock (see list_lru_from_memcg_idx).
  361. *
  362. * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  363. * we have to use IRQ-safe primitives here to avoid deadlock.
  364. */
  365. spin_lock_irq(&nlru->lock);
  366. rcu_assign_pointer(nlru->memcg_lrus, new);
  367. spin_unlock_irq(&nlru->lock);
  368. call_rcu(&old->rcu, kvfree_rcu);
  369. return 0;
  370. }
  371. static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
  372. int old_size, int new_size)
  373. {
  374. struct list_lru_memcg *memcg_lrus;
  375. memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
  376. lockdep_is_held(&list_lrus_mutex));
  377. /* do not bother shrinking the array back to the old size, because we
  378. * cannot handle allocation failures here */
  379. __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
  380. }
  381. static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  382. {
  383. int i;
  384. lru->memcg_aware = memcg_aware;
  385. if (!memcg_aware)
  386. return 0;
  387. for_each_node(i) {
  388. if (memcg_init_list_lru_node(&lru->node[i]))
  389. goto fail;
  390. }
  391. return 0;
  392. fail:
  393. for (i = i - 1; i >= 0; i--) {
  394. if (!lru->node[i].memcg_lrus)
  395. continue;
  396. memcg_destroy_list_lru_node(&lru->node[i]);
  397. }
  398. return -ENOMEM;
  399. }
  400. static void memcg_destroy_list_lru(struct list_lru *lru)
  401. {
  402. int i;
  403. if (!list_lru_memcg_aware(lru))
  404. return;
  405. for_each_node(i)
  406. memcg_destroy_list_lru_node(&lru->node[i]);
  407. }
  408. static int memcg_update_list_lru(struct list_lru *lru,
  409. int old_size, int new_size)
  410. {
  411. int i;
  412. if (!list_lru_memcg_aware(lru))
  413. return 0;
  414. for_each_node(i) {
  415. if (memcg_update_list_lru_node(&lru->node[i],
  416. old_size, new_size))
  417. goto fail;
  418. }
  419. return 0;
  420. fail:
  421. for (i = i - 1; i >= 0; i--) {
  422. if (!lru->node[i].memcg_lrus)
  423. continue;
  424. memcg_cancel_update_list_lru_node(&lru->node[i],
  425. old_size, new_size);
  426. }
  427. return -ENOMEM;
  428. }
  429. static void memcg_cancel_update_list_lru(struct list_lru *lru,
  430. int old_size, int new_size)
  431. {
  432. int i;
  433. if (!list_lru_memcg_aware(lru))
  434. return;
  435. for_each_node(i)
  436. memcg_cancel_update_list_lru_node(&lru->node[i],
  437. old_size, new_size);
  438. }
  439. int memcg_update_all_list_lrus(int new_size)
  440. {
  441. int ret = 0;
  442. struct list_lru *lru;
  443. int old_size = memcg_nr_cache_ids;
  444. mutex_lock(&list_lrus_mutex);
  445. list_for_each_entry(lru, &list_lrus, list) {
  446. ret = memcg_update_list_lru(lru, old_size, new_size);
  447. if (ret)
  448. goto fail;
  449. }
  450. out:
  451. mutex_unlock(&list_lrus_mutex);
  452. return ret;
  453. fail:
  454. list_for_each_entry_continue_reverse(lru, &list_lrus, list)
  455. memcg_cancel_update_list_lru(lru, old_size, new_size);
  456. goto out;
  457. }
  458. static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
  459. int src_idx, struct mem_cgroup *dst_memcg)
  460. {
  461. struct list_lru_node *nlru = &lru->node[nid];
  462. int dst_idx = dst_memcg->kmemcg_id;
  463. struct list_lru_one *src, *dst;
  464. bool set;
  465. /*
  466. * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  467. * we have to use IRQ-safe primitives here to avoid deadlock.
  468. */
  469. spin_lock_irq(&nlru->lock);
  470. src = list_lru_from_memcg_idx(nlru, src_idx);
  471. dst = list_lru_from_memcg_idx(nlru, dst_idx);
  472. list_splice_init(&src->list, &dst->list);
  473. set = (!dst->nr_items && src->nr_items);
  474. dst->nr_items += src->nr_items;
  475. if (set)
  476. memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
  477. src->nr_items = 0;
  478. spin_unlock_irq(&nlru->lock);
  479. }
  480. static void memcg_drain_list_lru(struct list_lru *lru,
  481. int src_idx, struct mem_cgroup *dst_memcg)
  482. {
  483. int i;
  484. if (!list_lru_memcg_aware(lru))
  485. return;
  486. for_each_node(i)
  487. memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
  488. }
  489. void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
  490. {
  491. struct list_lru *lru;
  492. mutex_lock(&list_lrus_mutex);
  493. list_for_each_entry(lru, &list_lrus, list)
  494. memcg_drain_list_lru(lru, src_idx, dst_memcg);
  495. mutex_unlock(&list_lrus_mutex);
  496. }
  497. #else
  498. static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  499. {
  500. return 0;
  501. }
  502. static void memcg_destroy_list_lru(struct list_lru *lru)
  503. {
  504. }
  505. #endif /* CONFIG_MEMCG_KMEM */
  506. int __list_lru_init(struct list_lru *lru, bool memcg_aware,
  507. struct lock_class_key *key, struct shrinker *shrinker)
  508. {
  509. int i;
  510. size_t size = sizeof(*lru->node) * nr_node_ids;
  511. int err = -ENOMEM;
  512. #ifdef CONFIG_MEMCG_KMEM
  513. if (shrinker)
  514. lru->shrinker_id = shrinker->id;
  515. else
  516. lru->shrinker_id = -1;
  517. #endif
  518. memcg_get_cache_ids();
  519. lru->node = kzalloc(size, GFP_KERNEL);
  520. if (!lru->node)
  521. goto out;
  522. for_each_node(i) {
  523. spin_lock_init(&lru->node[i].lock);
  524. if (key)
  525. lockdep_set_class(&lru->node[i].lock, key);
  526. init_one_lru(&lru->node[i].lru);
  527. }
  528. err = memcg_init_list_lru(lru, memcg_aware);
  529. if (err) {
  530. kfree(lru->node);
  531. /* Do this so a list_lru_destroy() doesn't crash: */
  532. lru->node = NULL;
  533. goto out;
  534. }
  535. list_lru_register(lru);
  536. out:
  537. memcg_put_cache_ids();
  538. return err;
  539. }
  540. EXPORT_SYMBOL_GPL(__list_lru_init);
  541. void list_lru_destroy(struct list_lru *lru)
  542. {
  543. /* Already destroyed or not yet initialized? */
  544. if (!lru->node)
  545. return;
  546. memcg_get_cache_ids();
  547. list_lru_unregister(lru);
  548. memcg_destroy_list_lru(lru);
  549. kfree(lru->node);
  550. lru->node = NULL;
  551. #ifdef CONFIG_MEMCG_KMEM
  552. lru->shrinker_id = -1;
  553. #endif
  554. memcg_put_cache_ids();
  555. }
  556. EXPORT_SYMBOL_GPL(list_lru_destroy);