xen-selfballoon.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580
  1. // SPDX-License-Identifier: GPL-2.0
  2. /******************************************************************************
  3. * Xen selfballoon driver (and optional frontswap self-shrinking driver)
  4. *
  5. * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
  6. *
  7. * This code complements the cleancache and frontswap patchsets to optimize
  8. * support for Xen Transcendent Memory ("tmem"). The policy it implements
  9. * is rudimentary and will likely improve over time, but it does work well
  10. * enough today.
  11. *
  12. * Two functionalities are implemented here which both use "control theory"
  13. * (feedback) to optimize memory utilization. In a virtualized environment
  14. * such as Xen, RAM is often a scarce resource and we would like to ensure
  15. * that each of a possibly large number of virtual machines is using RAM
  16. * efficiently, i.e. using as little as possible when under light load
  17. * and obtaining as much as possible when memory demands are high.
  18. * Since RAM needs vary highly dynamically and sometimes dramatically,
  19. * "hysteresis" is used, that is, memory target is determined not just
  20. * on current data but also on past data stored in the system.
  21. *
  22. * "Selfballooning" creates memory pressure by managing the Xen balloon
  23. * driver to decrease and increase available kernel memory, driven
  24. * largely by the target value of "Committed_AS" (see /proc/meminfo).
  25. * Since Committed_AS does not account for clean mapped pages (i.e. pages
  26. * in RAM that are identical to pages on disk), selfballooning has the
  27. * affect of pushing less frequently used clean pagecache pages out of
  28. * kernel RAM and, presumably using cleancache, into Xen tmem where
  29. * Xen can more efficiently optimize RAM utilization for such pages.
  30. *
  31. * When kernel memory demand unexpectedly increases faster than Xen, via
  32. * the selfballoon driver, is able to (or chooses to) provide usable RAM,
  33. * the kernel may invoke swapping. In most cases, frontswap is able
  34. * to absorb this swapping into Xen tmem. However, due to the fact
  35. * that the kernel swap subsystem assumes swapping occurs to a disk,
  36. * swapped pages may sit on the disk for a very long time; even if
  37. * the kernel knows the page will never be used again. This is because
  38. * the disk space costs very little and can be overwritten when
  39. * necessary. When such stale pages are in frontswap, however, they
  40. * are taking up valuable real estate. "Frontswap selfshrinking" works
  41. * to resolve this: When frontswap activity is otherwise stable
  42. * and the guest kernel is not under memory pressure, the "frontswap
  43. * selfshrinking" accounts for this by providing pressure to remove some
  44. * pages from frontswap and return them to kernel memory.
  45. *
  46. * For both "selfballooning" and "frontswap-selfshrinking", a worker
  47. * thread is used and sysfs tunables are provided to adjust the frequency
  48. * and rate of adjustments to achieve the goal, as well as to disable one
  49. * or both functions independently.
  50. *
  51. * While some argue that this functionality can and should be implemented
  52. * in userspace, it has been observed that bad things happen (e.g. OOMs).
  53. *
  54. * System configuration note: Selfballooning should not be enabled on
  55. * systems without a sufficiently large swap device configured; for best
  56. * results, it is recommended that total swap be increased by the size
  57. * of the guest memory. Note, that selfballooning should be disabled by default
  58. * if frontswap is not configured. Similarly selfballooning should be enabled
  59. * by default if frontswap is configured and can be disabled with the
  60. * "tmem.selfballooning=0" kernel boot option. Finally, when frontswap is
  61. * configured, frontswap-selfshrinking can be disabled with the
  62. * "tmem.selfshrink=0" kernel boot option.
  63. *
  64. * Selfballooning is disallowed in domain0 and force-disabled.
  65. *
  66. */
  67. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  68. #include <linux/kernel.h>
  69. #include <linux/bootmem.h>
  70. #include <linux/swap.h>
  71. #include <linux/mm.h>
  72. #include <linux/mman.h>
  73. #include <linux/workqueue.h>
  74. #include <linux/device.h>
  75. #include <xen/balloon.h>
  76. #include <xen/tmem.h>
  77. #include <xen/xen.h>
  78. /* Enable/disable with sysfs. */
  79. static int xen_selfballooning_enabled __read_mostly;
  80. /*
  81. * Controls rate at which memory target (this iteration) approaches
  82. * ultimate goal when memory need is increasing (up-hysteresis) or
  83. * decreasing (down-hysteresis). Higher values of hysteresis cause
  84. * slower increases/decreases. The default values for the various
  85. * parameters were deemed reasonable by experimentation, may be
  86. * workload-dependent, and can all be adjusted via sysfs.
  87. */
  88. static unsigned int selfballoon_downhysteresis __read_mostly = 8;
  89. static unsigned int selfballoon_uphysteresis __read_mostly = 1;
  90. /* In HZ, controls frequency of worker invocation. */
  91. static unsigned int selfballoon_interval __read_mostly = 5;
  92. /*
  93. * Minimum usable RAM in MB for selfballooning target for balloon.
  94. * If non-zero, it is added to totalreserve_pages and self-ballooning
  95. * will not balloon below the sum. If zero, a piecewise linear function
  96. * is calculated as a minimum and added to totalreserve_pages. Note that
  97. * setting this value indiscriminately may cause OOMs and crashes.
  98. */
  99. static unsigned int selfballoon_min_usable_mb;
  100. /*
  101. * Amount of RAM in MB to add to the target number of pages.
  102. * Can be used to reserve some more room for caches and the like.
  103. */
  104. static unsigned int selfballoon_reserved_mb;
  105. static void selfballoon_process(struct work_struct *work);
  106. static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
  107. #ifdef CONFIG_FRONTSWAP
  108. #include <linux/frontswap.h>
  109. /* Enable/disable with sysfs. */
  110. static bool frontswap_selfshrinking __read_mostly;
  111. /*
  112. * The default values for the following parameters were deemed reasonable
  113. * by experimentation, may be workload-dependent, and can all be
  114. * adjusted via sysfs.
  115. */
  116. /* Control rate for frontswap shrinking. Higher hysteresis is slower. */
  117. static unsigned int frontswap_hysteresis __read_mostly = 20;
  118. /*
  119. * Number of selfballoon worker invocations to wait before observing that
  120. * frontswap selfshrinking should commence. Note that selfshrinking does
  121. * not use a separate worker thread.
  122. */
  123. static unsigned int frontswap_inertia __read_mostly = 3;
  124. /* Countdown to next invocation of frontswap_shrink() */
  125. static unsigned long frontswap_inertia_counter;
  126. /*
  127. * Invoked by the selfballoon worker thread, uses current number of pages
  128. * in frontswap (frontswap_curr_pages()), previous status, and control
  129. * values (hysteresis and inertia) to determine if frontswap should be
  130. * shrunk and what the new frontswap size should be. Note that
  131. * frontswap_shrink is essentially a partial swapoff that immediately
  132. * transfers pages from the "swap device" (frontswap) back into kernel
  133. * RAM; despite the name, frontswap "shrinking" is very different from
  134. * the "shrinker" interface used by the kernel MM subsystem to reclaim
  135. * memory.
  136. */
  137. static void frontswap_selfshrink(void)
  138. {
  139. static unsigned long cur_frontswap_pages;
  140. unsigned long last_frontswap_pages;
  141. unsigned long tgt_frontswap_pages;
  142. last_frontswap_pages = cur_frontswap_pages;
  143. cur_frontswap_pages = frontswap_curr_pages();
  144. if (!cur_frontswap_pages ||
  145. (cur_frontswap_pages > last_frontswap_pages)) {
  146. frontswap_inertia_counter = frontswap_inertia;
  147. return;
  148. }
  149. if (frontswap_inertia_counter && --frontswap_inertia_counter)
  150. return;
  151. if (cur_frontswap_pages <= frontswap_hysteresis)
  152. tgt_frontswap_pages = 0;
  153. else
  154. tgt_frontswap_pages = cur_frontswap_pages -
  155. (cur_frontswap_pages / frontswap_hysteresis);
  156. frontswap_shrink(tgt_frontswap_pages);
  157. frontswap_inertia_counter = frontswap_inertia;
  158. }
  159. #endif /* CONFIG_FRONTSWAP */
  160. #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
  161. #define PAGES2MB(pages) ((pages) >> (20 - PAGE_SHIFT))
  162. /*
  163. * Use current balloon size, the goal (vm_committed_as), and hysteresis
  164. * parameters to set a new target balloon size
  165. */
  166. static void selfballoon_process(struct work_struct *work)
  167. {
  168. unsigned long cur_pages, goal_pages, tgt_pages, floor_pages;
  169. unsigned long useful_pages;
  170. bool reset_timer = false;
  171. if (xen_selfballooning_enabled) {
  172. cur_pages = totalram_pages;
  173. tgt_pages = cur_pages; /* default is no change */
  174. goal_pages = vm_memory_committed() +
  175. totalreserve_pages +
  176. MB2PAGES(selfballoon_reserved_mb);
  177. #ifdef CONFIG_FRONTSWAP
  178. /* allow space for frontswap pages to be repatriated */
  179. if (frontswap_selfshrinking)
  180. goal_pages += frontswap_curr_pages();
  181. #endif
  182. if (cur_pages > goal_pages)
  183. tgt_pages = cur_pages -
  184. ((cur_pages - goal_pages) /
  185. selfballoon_downhysteresis);
  186. else if (cur_pages < goal_pages)
  187. tgt_pages = cur_pages +
  188. ((goal_pages - cur_pages) /
  189. selfballoon_uphysteresis);
  190. /* else if cur_pages == goal_pages, no change */
  191. useful_pages = max_pfn - totalreserve_pages;
  192. if (selfballoon_min_usable_mb != 0)
  193. floor_pages = totalreserve_pages +
  194. MB2PAGES(selfballoon_min_usable_mb);
  195. /* piecewise linear function ending in ~3% slope */
  196. else if (useful_pages < MB2PAGES(16))
  197. floor_pages = max_pfn; /* not worth ballooning */
  198. else if (useful_pages < MB2PAGES(64))
  199. floor_pages = totalreserve_pages + MB2PAGES(16) +
  200. ((useful_pages - MB2PAGES(16)) >> 1);
  201. else if (useful_pages < MB2PAGES(512))
  202. floor_pages = totalreserve_pages + MB2PAGES(40) +
  203. ((useful_pages - MB2PAGES(40)) >> 3);
  204. else /* useful_pages >= MB2PAGES(512) */
  205. floor_pages = totalreserve_pages + MB2PAGES(99) +
  206. ((useful_pages - MB2PAGES(99)) >> 5);
  207. if (tgt_pages < floor_pages)
  208. tgt_pages = floor_pages;
  209. balloon_set_new_target(tgt_pages +
  210. balloon_stats.current_pages - totalram_pages);
  211. reset_timer = true;
  212. }
  213. #ifdef CONFIG_FRONTSWAP
  214. if (frontswap_selfshrinking) {
  215. frontswap_selfshrink();
  216. reset_timer = true;
  217. }
  218. #endif
  219. if (reset_timer)
  220. schedule_delayed_work(&selfballoon_worker,
  221. selfballoon_interval * HZ);
  222. }
  223. #ifdef CONFIG_SYSFS
  224. #include <linux/capability.h>
  225. #define SELFBALLOON_SHOW(name, format, args...) \
  226. static ssize_t show_##name(struct device *dev, \
  227. struct device_attribute *attr, \
  228. char *buf) \
  229. { \
  230. return sprintf(buf, format, ##args); \
  231. }
  232. SELFBALLOON_SHOW(selfballooning, "%d\n", xen_selfballooning_enabled);
  233. static ssize_t store_selfballooning(struct device *dev,
  234. struct device_attribute *attr,
  235. const char *buf,
  236. size_t count)
  237. {
  238. bool was_enabled = xen_selfballooning_enabled;
  239. unsigned long tmp;
  240. int err;
  241. if (!capable(CAP_SYS_ADMIN))
  242. return -EPERM;
  243. err = kstrtoul(buf, 10, &tmp);
  244. if (err)
  245. return err;
  246. if ((tmp != 0) && (tmp != 1))
  247. return -EINVAL;
  248. xen_selfballooning_enabled = !!tmp;
  249. if (!was_enabled && xen_selfballooning_enabled)
  250. schedule_delayed_work(&selfballoon_worker,
  251. selfballoon_interval * HZ);
  252. return count;
  253. }
  254. static DEVICE_ATTR(selfballooning, S_IRUGO | S_IWUSR,
  255. show_selfballooning, store_selfballooning);
  256. SELFBALLOON_SHOW(selfballoon_interval, "%d\n", selfballoon_interval);
  257. static ssize_t store_selfballoon_interval(struct device *dev,
  258. struct device_attribute *attr,
  259. const char *buf,
  260. size_t count)
  261. {
  262. unsigned long val;
  263. int err;
  264. if (!capable(CAP_SYS_ADMIN))
  265. return -EPERM;
  266. err = kstrtoul(buf, 10, &val);
  267. if (err)
  268. return err;
  269. if (val == 0)
  270. return -EINVAL;
  271. selfballoon_interval = val;
  272. return count;
  273. }
  274. static DEVICE_ATTR(selfballoon_interval, S_IRUGO | S_IWUSR,
  275. show_selfballoon_interval, store_selfballoon_interval);
  276. SELFBALLOON_SHOW(selfballoon_downhys, "%d\n", selfballoon_downhysteresis);
  277. static ssize_t store_selfballoon_downhys(struct device *dev,
  278. struct device_attribute *attr,
  279. const char *buf,
  280. size_t count)
  281. {
  282. unsigned long val;
  283. int err;
  284. if (!capable(CAP_SYS_ADMIN))
  285. return -EPERM;
  286. err = kstrtoul(buf, 10, &val);
  287. if (err)
  288. return err;
  289. if (val == 0)
  290. return -EINVAL;
  291. selfballoon_downhysteresis = val;
  292. return count;
  293. }
  294. static DEVICE_ATTR(selfballoon_downhysteresis, S_IRUGO | S_IWUSR,
  295. show_selfballoon_downhys, store_selfballoon_downhys);
  296. SELFBALLOON_SHOW(selfballoon_uphys, "%d\n", selfballoon_uphysteresis);
  297. static ssize_t store_selfballoon_uphys(struct device *dev,
  298. struct device_attribute *attr,
  299. const char *buf,
  300. size_t count)
  301. {
  302. unsigned long val;
  303. int err;
  304. if (!capable(CAP_SYS_ADMIN))
  305. return -EPERM;
  306. err = kstrtoul(buf, 10, &val);
  307. if (err)
  308. return err;
  309. if (val == 0)
  310. return -EINVAL;
  311. selfballoon_uphysteresis = val;
  312. return count;
  313. }
  314. static DEVICE_ATTR(selfballoon_uphysteresis, S_IRUGO | S_IWUSR,
  315. show_selfballoon_uphys, store_selfballoon_uphys);
  316. SELFBALLOON_SHOW(selfballoon_min_usable_mb, "%d\n",
  317. selfballoon_min_usable_mb);
  318. static ssize_t store_selfballoon_min_usable_mb(struct device *dev,
  319. struct device_attribute *attr,
  320. const char *buf,
  321. size_t count)
  322. {
  323. unsigned long val;
  324. int err;
  325. if (!capable(CAP_SYS_ADMIN))
  326. return -EPERM;
  327. err = kstrtoul(buf, 10, &val);
  328. if (err)
  329. return err;
  330. if (val == 0)
  331. return -EINVAL;
  332. selfballoon_min_usable_mb = val;
  333. return count;
  334. }
  335. static DEVICE_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
  336. show_selfballoon_min_usable_mb,
  337. store_selfballoon_min_usable_mb);
  338. SELFBALLOON_SHOW(selfballoon_reserved_mb, "%d\n",
  339. selfballoon_reserved_mb);
  340. static ssize_t store_selfballoon_reserved_mb(struct device *dev,
  341. struct device_attribute *attr,
  342. const char *buf,
  343. size_t count)
  344. {
  345. unsigned long val;
  346. int err;
  347. if (!capable(CAP_SYS_ADMIN))
  348. return -EPERM;
  349. err = kstrtoul(buf, 10, &val);
  350. if (err)
  351. return err;
  352. if (val == 0)
  353. return -EINVAL;
  354. selfballoon_reserved_mb = val;
  355. return count;
  356. }
  357. static DEVICE_ATTR(selfballoon_reserved_mb, S_IRUGO | S_IWUSR,
  358. show_selfballoon_reserved_mb,
  359. store_selfballoon_reserved_mb);
  360. #ifdef CONFIG_FRONTSWAP
  361. SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking);
  362. static ssize_t store_frontswap_selfshrinking(struct device *dev,
  363. struct device_attribute *attr,
  364. const char *buf,
  365. size_t count)
  366. {
  367. bool was_enabled = frontswap_selfshrinking;
  368. unsigned long tmp;
  369. int err;
  370. if (!capable(CAP_SYS_ADMIN))
  371. return -EPERM;
  372. err = kstrtoul(buf, 10, &tmp);
  373. if (err)
  374. return err;
  375. if ((tmp != 0) && (tmp != 1))
  376. return -EINVAL;
  377. frontswap_selfshrinking = !!tmp;
  378. if (!was_enabled && !xen_selfballooning_enabled &&
  379. frontswap_selfshrinking)
  380. schedule_delayed_work(&selfballoon_worker,
  381. selfballoon_interval * HZ);
  382. return count;
  383. }
  384. static DEVICE_ATTR(frontswap_selfshrinking, S_IRUGO | S_IWUSR,
  385. show_frontswap_selfshrinking, store_frontswap_selfshrinking);
  386. SELFBALLOON_SHOW(frontswap_inertia, "%d\n", frontswap_inertia);
  387. static ssize_t store_frontswap_inertia(struct device *dev,
  388. struct device_attribute *attr,
  389. const char *buf,
  390. size_t count)
  391. {
  392. unsigned long val;
  393. int err;
  394. if (!capable(CAP_SYS_ADMIN))
  395. return -EPERM;
  396. err = kstrtoul(buf, 10, &val);
  397. if (err)
  398. return err;
  399. if (val == 0)
  400. return -EINVAL;
  401. frontswap_inertia = val;
  402. frontswap_inertia_counter = val;
  403. return count;
  404. }
  405. static DEVICE_ATTR(frontswap_inertia, S_IRUGO | S_IWUSR,
  406. show_frontswap_inertia, store_frontswap_inertia);
  407. SELFBALLOON_SHOW(frontswap_hysteresis, "%d\n", frontswap_hysteresis);
  408. static ssize_t store_frontswap_hysteresis(struct device *dev,
  409. struct device_attribute *attr,
  410. const char *buf,
  411. size_t count)
  412. {
  413. unsigned long val;
  414. int err;
  415. if (!capable(CAP_SYS_ADMIN))
  416. return -EPERM;
  417. err = kstrtoul(buf, 10, &val);
  418. if (err)
  419. return err;
  420. if (val == 0)
  421. return -EINVAL;
  422. frontswap_hysteresis = val;
  423. return count;
  424. }
  425. static DEVICE_ATTR(frontswap_hysteresis, S_IRUGO | S_IWUSR,
  426. show_frontswap_hysteresis, store_frontswap_hysteresis);
  427. #endif /* CONFIG_FRONTSWAP */
  428. static struct attribute *selfballoon_attrs[] = {
  429. &dev_attr_selfballooning.attr,
  430. &dev_attr_selfballoon_interval.attr,
  431. &dev_attr_selfballoon_downhysteresis.attr,
  432. &dev_attr_selfballoon_uphysteresis.attr,
  433. &dev_attr_selfballoon_min_usable_mb.attr,
  434. &dev_attr_selfballoon_reserved_mb.attr,
  435. #ifdef CONFIG_FRONTSWAP
  436. &dev_attr_frontswap_selfshrinking.attr,
  437. &dev_attr_frontswap_hysteresis.attr,
  438. &dev_attr_frontswap_inertia.attr,
  439. #endif
  440. NULL
  441. };
  442. static const struct attribute_group selfballoon_group = {
  443. .name = "selfballoon",
  444. .attrs = selfballoon_attrs
  445. };
  446. #endif
  447. int register_xen_selfballooning(struct device *dev)
  448. {
  449. int error = -1;
  450. #ifdef CONFIG_SYSFS
  451. error = sysfs_create_group(&dev->kobj, &selfballoon_group);
  452. #endif
  453. return error;
  454. }
  455. EXPORT_SYMBOL(register_xen_selfballooning);
  456. int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink)
  457. {
  458. bool enable = false;
  459. unsigned long reserve_pages;
  460. if (!xen_domain())
  461. return -ENODEV;
  462. if (xen_initial_domain()) {
  463. pr_info("Xen selfballooning driver disabled for domain0\n");
  464. return -ENODEV;
  465. }
  466. xen_selfballooning_enabled = tmem_enabled && use_selfballooning;
  467. if (xen_selfballooning_enabled) {
  468. pr_info("Initializing Xen selfballooning driver\n");
  469. enable = true;
  470. }
  471. #ifdef CONFIG_FRONTSWAP
  472. frontswap_selfshrinking = tmem_enabled && use_frontswap_selfshrink;
  473. if (frontswap_selfshrinking) {
  474. pr_info("Initializing frontswap selfshrinking driver\n");
  475. enable = true;
  476. }
  477. #endif
  478. if (!enable)
  479. return -ENODEV;
  480. /*
  481. * Give selfballoon_reserved_mb a default value(10% of total ram pages)
  482. * to make selfballoon not so aggressive.
  483. *
  484. * There are mainly two reasons:
  485. * 1) The original goal_page didn't consider some pages used by kernel
  486. * space, like slab pages and memory used by device drivers.
  487. *
  488. * 2) The balloon driver may not give back memory to guest OS fast
  489. * enough when the workload suddenly aquries a lot of physical memory.
  490. *
  491. * In both cases, the guest OS will suffer from memory pressure and
  492. * OOM killer may be triggered.
  493. * By reserving extra 10% of total ram pages, we can keep the system
  494. * much more reliably and response faster in some cases.
  495. */
  496. if (!selfballoon_reserved_mb) {
  497. reserve_pages = totalram_pages / 10;
  498. selfballoon_reserved_mb = PAGES2MB(reserve_pages);
  499. }
  500. schedule_delayed_work(&selfballoon_worker, selfballoon_interval * HZ);
  501. return 0;
  502. }
  503. EXPORT_SYMBOL(xen_selfballoon_init);