vmw_balloon.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310
  1. /*
  2. * VMware Balloon driver.
  3. *
  4. * Copyright (C) 2000-2014, VMware, Inc. All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; version 2 of the License and no later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13. * NON INFRINGEMENT. See the GNU General Public License for more
  14. * details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19. *
  20. * Maintained by: Xavier Deguillard <xdeguillard@vmware.com>
  21. * Philip Moltmann <moltmann@vmware.com>
  22. */
  23. /*
  24. * This is VMware physical memory management driver for Linux. The driver
  25. * acts like a "balloon" that can be inflated to reclaim physical pages by
  26. * reserving them in the guest and invalidating them in the monitor,
  27. * freeing up the underlying machine pages so they can be allocated to
  28. * other guests. The balloon can also be deflated to allow the guest to
  29. * use more physical memory. Higher level policies can control the sizes
  30. * of balloons in VMs in order to manage physical memory resources.
  31. */
  32. //#define DEBUG
  33. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  34. #include <linux/types.h>
  35. #include <linux/kernel.h>
  36. #include <linux/mm.h>
  37. #include <linux/vmalloc.h>
  38. #include <linux/sched.h>
  39. #include <linux/module.h>
  40. #include <linux/workqueue.h>
  41. #include <linux/debugfs.h>
  42. #include <linux/seq_file.h>
  43. #include <linux/vmw_vmci_defs.h>
  44. #include <linux/vmw_vmci_api.h>
  45. #include <asm/hypervisor.h>
  46. MODULE_AUTHOR("VMware, Inc.");
  47. MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
  48. MODULE_VERSION("1.5.0.0-k");
  49. MODULE_ALIAS("dmi:*:svnVMware*:*");
  50. MODULE_ALIAS("vmware_vmmemctl");
  51. MODULE_LICENSE("GPL");
  52. /*
  53. * Various constants controlling rate of inflaint/deflating balloon,
  54. * measured in pages.
  55. */
  56. /*
  57. * Rates of memory allocaton when guest experiences memory pressure
  58. * (driver performs sleeping allocations).
  59. */
  60. #define VMW_BALLOON_RATE_ALLOC_MIN 512U
  61. #define VMW_BALLOON_RATE_ALLOC_MAX 2048U
  62. #define VMW_BALLOON_RATE_ALLOC_INC 16U
  63. /*
  64. * When guest is under memory pressure, use a reduced page allocation
  65. * rate for next several cycles.
  66. */
  67. #define VMW_BALLOON_SLOW_CYCLES 4
  68. /*
  69. * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
  70. * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
  71. * __GFP_NOWARN, to suppress page allocation failure warnings.
  72. */
  73. #define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
  74. /*
  75. * Use GFP_HIGHUSER when executing in a separate kernel thread
  76. * context and allocation can sleep. This is less stressful to
  77. * the guest memory system, since it allows the thread to block
  78. * while memory is reclaimed, and won't take pages from emergency
  79. * low-memory pools.
  80. */
  81. #define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
  82. /* Maximum number of refused pages we accumulate during inflation cycle */
  83. #define VMW_BALLOON_MAX_REFUSED 16
  84. /*
  85. * Hypervisor communication port definitions.
  86. */
  87. #define VMW_BALLOON_HV_PORT 0x5670
  88. #define VMW_BALLOON_HV_MAGIC 0x456c6d6f
  89. #define VMW_BALLOON_GUEST_ID 1 /* Linux */
  90. enum vmwballoon_capabilities {
  91. /*
  92. * Bit 0 is reserved and not associated to any capability.
  93. */
  94. VMW_BALLOON_BASIC_CMDS = (1 << 1),
  95. VMW_BALLOON_BATCHED_CMDS = (1 << 2),
  96. VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
  97. VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
  98. };
  99. #define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \
  100. | VMW_BALLOON_BATCHED_CMDS \
  101. | VMW_BALLOON_BATCHED_2M_CMDS \
  102. | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
  103. #define VMW_BALLOON_2M_SHIFT (9)
  104. #define VMW_BALLOON_NUM_PAGE_SIZES (2)
  105. /*
  106. * Backdoor commands availability:
  107. *
  108. * START, GET_TARGET and GUEST_ID are always available,
  109. *
  110. * VMW_BALLOON_BASIC_CMDS:
  111. * LOCK and UNLOCK commands,
  112. * VMW_BALLOON_BATCHED_CMDS:
  113. * BATCHED_LOCK and BATCHED_UNLOCK commands.
  114. * VMW BALLOON_BATCHED_2M_CMDS:
  115. * BATCHED_2M_LOCK and BATCHED_2M_UNLOCK commands,
  116. * VMW VMW_BALLOON_SIGNALLED_WAKEUP_CMD:
  117. * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command.
  118. */
  119. #define VMW_BALLOON_CMD_START 0
  120. #define VMW_BALLOON_CMD_GET_TARGET 1
  121. #define VMW_BALLOON_CMD_LOCK 2
  122. #define VMW_BALLOON_CMD_UNLOCK 3
  123. #define VMW_BALLOON_CMD_GUEST_ID 4
  124. #define VMW_BALLOON_CMD_BATCHED_LOCK 6
  125. #define VMW_BALLOON_CMD_BATCHED_UNLOCK 7
  126. #define VMW_BALLOON_CMD_BATCHED_2M_LOCK 8
  127. #define VMW_BALLOON_CMD_BATCHED_2M_UNLOCK 9
  128. #define VMW_BALLOON_CMD_VMCI_DOORBELL_SET 10
  129. /* error codes */
  130. #define VMW_BALLOON_SUCCESS 0
  131. #define VMW_BALLOON_FAILURE -1
  132. #define VMW_BALLOON_ERROR_CMD_INVALID 1
  133. #define VMW_BALLOON_ERROR_PPN_INVALID 2
  134. #define VMW_BALLOON_ERROR_PPN_LOCKED 3
  135. #define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
  136. #define VMW_BALLOON_ERROR_PPN_PINNED 5
  137. #define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
  138. #define VMW_BALLOON_ERROR_RESET 7
  139. #define VMW_BALLOON_ERROR_BUSY 8
  140. #define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
  141. /* Batch page description */
  142. /*
  143. * Layout of a page in the batch page:
  144. *
  145. * +-------------+----------+--------+
  146. * | | | |
  147. * | Page number | Reserved | Status |
  148. * | | | |
  149. * +-------------+----------+--------+
  150. * 64 PAGE_SHIFT 6 0
  151. *
  152. * The reserved field should be set to 0.
  153. */
  154. #define VMW_BALLOON_BATCH_MAX_PAGES (PAGE_SIZE / sizeof(u64))
  155. #define VMW_BALLOON_BATCH_STATUS_MASK ((1UL << 5) - 1)
  156. #define VMW_BALLOON_BATCH_PAGE_MASK (~((1UL << PAGE_SHIFT) - 1))
  157. struct vmballoon_batch_page {
  158. u64 pages[VMW_BALLOON_BATCH_MAX_PAGES];
  159. };
  160. static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx)
  161. {
  162. return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK;
  163. }
  164. static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch,
  165. int idx)
  166. {
  167. return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK);
  168. }
  169. static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx,
  170. u64 pa)
  171. {
  172. batch->pages[idx] = pa;
  173. }
  174. #define VMWARE_BALLOON_CMD(cmd, arg1, arg2, result) \
  175. ({ \
  176. unsigned long __status, __dummy1, __dummy2, __dummy3; \
  177. __asm__ __volatile__ ("inl %%dx" : \
  178. "=a"(__status), \
  179. "=c"(__dummy1), \
  180. "=d"(__dummy2), \
  181. "=b"(result), \
  182. "=S" (__dummy3) : \
  183. "0"(VMW_BALLOON_HV_MAGIC), \
  184. "1"(VMW_BALLOON_CMD_##cmd), \
  185. "2"(VMW_BALLOON_HV_PORT), \
  186. "3"(arg1), \
  187. "4" (arg2) : \
  188. "memory"); \
  189. if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \
  190. result = __dummy1; \
  191. result &= -1UL; \
  192. __status & -1UL; \
  193. })
  194. #ifdef CONFIG_DEBUG_FS
  195. struct vmballoon_stats {
  196. unsigned int timer;
  197. unsigned int doorbell;
  198. /* allocation statistics */
  199. unsigned int alloc[VMW_BALLOON_NUM_PAGE_SIZES];
  200. unsigned int alloc_fail[VMW_BALLOON_NUM_PAGE_SIZES];
  201. unsigned int sleep_alloc;
  202. unsigned int sleep_alloc_fail;
  203. unsigned int refused_alloc[VMW_BALLOON_NUM_PAGE_SIZES];
  204. unsigned int refused_free[VMW_BALLOON_NUM_PAGE_SIZES];
  205. unsigned int free[VMW_BALLOON_NUM_PAGE_SIZES];
  206. /* monitor operations */
  207. unsigned int lock[VMW_BALLOON_NUM_PAGE_SIZES];
  208. unsigned int lock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
  209. unsigned int unlock[VMW_BALLOON_NUM_PAGE_SIZES];
  210. unsigned int unlock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
  211. unsigned int target;
  212. unsigned int target_fail;
  213. unsigned int start;
  214. unsigned int start_fail;
  215. unsigned int guest_type;
  216. unsigned int guest_type_fail;
  217. unsigned int doorbell_set;
  218. unsigned int doorbell_unset;
  219. };
  220. #define STATS_INC(stat) (stat)++
  221. #else
  222. #define STATS_INC(stat)
  223. #endif
  224. struct vmballoon;
  225. struct vmballoon_ops {
  226. void (*add_page)(struct vmballoon *b, int idx, struct page *p);
  227. int (*lock)(struct vmballoon *b, unsigned int num_pages,
  228. bool is_2m_pages, unsigned int *target);
  229. int (*unlock)(struct vmballoon *b, unsigned int num_pages,
  230. bool is_2m_pages, unsigned int *target);
  231. };
  232. struct vmballoon_page_size {
  233. /* list of reserved physical pages */
  234. struct list_head pages;
  235. /* transient list of non-balloonable pages */
  236. struct list_head refused_pages;
  237. unsigned int n_refused_pages;
  238. };
  239. struct vmballoon {
  240. struct vmballoon_page_size page_sizes[VMW_BALLOON_NUM_PAGE_SIZES];
  241. /* supported page sizes. 1 == 4k pages only, 2 == 4k and 2m pages */
  242. unsigned supported_page_sizes;
  243. /* balloon size in pages */
  244. unsigned int size;
  245. unsigned int target;
  246. /* reset flag */
  247. bool reset_required;
  248. /* adjustment rates (pages per second) */
  249. unsigned int rate_alloc;
  250. /* slowdown page allocations for next few cycles */
  251. unsigned int slow_allocation_cycles;
  252. unsigned long capabilities;
  253. struct vmballoon_batch_page *batch_page;
  254. unsigned int batch_max_pages;
  255. struct page *page;
  256. const struct vmballoon_ops *ops;
  257. #ifdef CONFIG_DEBUG_FS
  258. /* statistics */
  259. struct vmballoon_stats stats;
  260. /* debugfs file exporting statistics */
  261. struct dentry *dbg_entry;
  262. #endif
  263. struct sysinfo sysinfo;
  264. struct delayed_work dwork;
  265. struct vmci_handle vmci_doorbell;
  266. };
  267. static struct vmballoon balloon;
  268. /*
  269. * Send "start" command to the host, communicating supported version
  270. * of the protocol.
  271. */
  272. static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
  273. {
  274. unsigned long status, capabilities, dummy = 0;
  275. bool success;
  276. STATS_INC(b->stats.start);
  277. status = VMWARE_BALLOON_CMD(START, req_caps, dummy, capabilities);
  278. switch (status) {
  279. case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
  280. b->capabilities = capabilities;
  281. success = true;
  282. break;
  283. case VMW_BALLOON_SUCCESS:
  284. b->capabilities = VMW_BALLOON_BASIC_CMDS;
  285. success = true;
  286. break;
  287. default:
  288. success = false;
  289. }
  290. if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
  291. b->supported_page_sizes = 2;
  292. else
  293. b->supported_page_sizes = 1;
  294. if (!success) {
  295. pr_debug("%s - failed, hv returns %ld\n", __func__, status);
  296. STATS_INC(b->stats.start_fail);
  297. }
  298. return success;
  299. }
  300. static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
  301. {
  302. switch (status) {
  303. case VMW_BALLOON_SUCCESS:
  304. return true;
  305. case VMW_BALLOON_ERROR_RESET:
  306. b->reset_required = true;
  307. /* fall through */
  308. default:
  309. return false;
  310. }
  311. }
  312. /*
  313. * Communicate guest type to the host so that it can adjust ballooning
  314. * algorithm to the one most appropriate for the guest. This command
  315. * is normally issued after sending "start" command and is part of
  316. * standard reset sequence.
  317. */
  318. static bool vmballoon_send_guest_id(struct vmballoon *b)
  319. {
  320. unsigned long status, dummy = 0;
  321. status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy,
  322. dummy);
  323. STATS_INC(b->stats.guest_type);
  324. if (vmballoon_check_status(b, status))
  325. return true;
  326. pr_debug("%s - failed, hv returns %ld\n", __func__, status);
  327. STATS_INC(b->stats.guest_type_fail);
  328. return false;
  329. }
  330. static u16 vmballoon_page_size(bool is_2m_page)
  331. {
  332. if (is_2m_page)
  333. return 1 << VMW_BALLOON_2M_SHIFT;
  334. return 1;
  335. }
  336. /*
  337. * Retrieve desired balloon size from the host.
  338. */
  339. static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
  340. {
  341. unsigned long status;
  342. unsigned long target;
  343. unsigned long limit;
  344. unsigned long dummy = 0;
  345. u32 limit32;
  346. /*
  347. * si_meminfo() is cheap. Moreover, we want to provide dynamic
  348. * max balloon size later. So let us call si_meminfo() every
  349. * iteration.
  350. */
  351. si_meminfo(&b->sysinfo);
  352. limit = b->sysinfo.totalram;
  353. /* Ensure limit fits in 32-bits */
  354. limit32 = (u32)limit;
  355. if (limit != limit32)
  356. return false;
  357. /* update stats */
  358. STATS_INC(b->stats.target);
  359. status = VMWARE_BALLOON_CMD(GET_TARGET, limit, dummy, target);
  360. if (vmballoon_check_status(b, status)) {
  361. *new_target = target;
  362. return true;
  363. }
  364. pr_debug("%s - failed, hv returns %ld\n", __func__, status);
  365. STATS_INC(b->stats.target_fail);
  366. return false;
  367. }
  368. /*
  369. * Notify the host about allocated page so that host can use it without
  370. * fear that guest will need it. Host may reject some pages, we need to
  371. * check the return value and maybe submit a different page.
  372. */
  373. static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
  374. unsigned int *hv_status, unsigned int *target)
  375. {
  376. unsigned long status, dummy = 0;
  377. u32 pfn32;
  378. pfn32 = (u32)pfn;
  379. if (pfn32 != pfn)
  380. return -1;
  381. STATS_INC(b->stats.lock[false]);
  382. *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, *target);
  383. if (vmballoon_check_status(b, status))
  384. return 0;
  385. pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
  386. STATS_INC(b->stats.lock_fail[false]);
  387. return 1;
  388. }
  389. static int vmballoon_send_batched_lock(struct vmballoon *b,
  390. unsigned int num_pages, bool is_2m_pages, unsigned int *target)
  391. {
  392. unsigned long status;
  393. unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
  394. STATS_INC(b->stats.lock[is_2m_pages]);
  395. if (is_2m_pages)
  396. status = VMWARE_BALLOON_CMD(BATCHED_2M_LOCK, pfn, num_pages,
  397. *target);
  398. else
  399. status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages,
  400. *target);
  401. if (vmballoon_check_status(b, status))
  402. return 0;
  403. pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
  404. STATS_INC(b->stats.lock_fail[is_2m_pages]);
  405. return 1;
  406. }
  407. /*
  408. * Notify the host that guest intends to release given page back into
  409. * the pool of available (to the guest) pages.
  410. */
  411. static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn,
  412. unsigned int *target)
  413. {
  414. unsigned long status, dummy = 0;
  415. u32 pfn32;
  416. pfn32 = (u32)pfn;
  417. if (pfn32 != pfn)
  418. return false;
  419. STATS_INC(b->stats.unlock[false]);
  420. status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy, *target);
  421. if (vmballoon_check_status(b, status))
  422. return true;
  423. pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
  424. STATS_INC(b->stats.unlock_fail[false]);
  425. return false;
  426. }
  427. static bool vmballoon_send_batched_unlock(struct vmballoon *b,
  428. unsigned int num_pages, bool is_2m_pages, unsigned int *target)
  429. {
  430. unsigned long status;
  431. unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
  432. STATS_INC(b->stats.unlock[is_2m_pages]);
  433. if (is_2m_pages)
  434. status = VMWARE_BALLOON_CMD(BATCHED_2M_UNLOCK, pfn, num_pages,
  435. *target);
  436. else
  437. status = VMWARE_BALLOON_CMD(BATCHED_UNLOCK, pfn, num_pages,
  438. *target);
  439. if (vmballoon_check_status(b, status))
  440. return true;
  441. pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
  442. STATS_INC(b->stats.unlock_fail[is_2m_pages]);
  443. return false;
  444. }
  445. static struct page *vmballoon_alloc_page(gfp_t flags, bool is_2m_page)
  446. {
  447. if (is_2m_page)
  448. return alloc_pages(flags, VMW_BALLOON_2M_SHIFT);
  449. return alloc_page(flags);
  450. }
  451. static void vmballoon_free_page(struct page *page, bool is_2m_page)
  452. {
  453. if (is_2m_page)
  454. __free_pages(page, VMW_BALLOON_2M_SHIFT);
  455. else
  456. __free_page(page);
  457. }
  458. /*
  459. * Quickly release all pages allocated for the balloon. This function is
  460. * called when host decides to "reset" balloon for one reason or another.
  461. * Unlike normal "deflate" we do not (shall not) notify host of the pages
  462. * being released.
  463. */
  464. static void vmballoon_pop(struct vmballoon *b)
  465. {
  466. struct page *page, *next;
  467. unsigned is_2m_pages;
  468. for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
  469. is_2m_pages++) {
  470. struct vmballoon_page_size *page_size =
  471. &b->page_sizes[is_2m_pages];
  472. u16 size_per_page = vmballoon_page_size(is_2m_pages);
  473. list_for_each_entry_safe(page, next, &page_size->pages, lru) {
  474. list_del(&page->lru);
  475. vmballoon_free_page(page, is_2m_pages);
  476. STATS_INC(b->stats.free[is_2m_pages]);
  477. b->size -= size_per_page;
  478. cond_resched();
  479. }
  480. }
  481. /* Clearing the batch_page unconditionally has no adverse effect */
  482. free_page((unsigned long)b->batch_page);
  483. b->batch_page = NULL;
  484. }
  485. /*
  486. * Notify the host of a ballooned page. If host rejects the page put it on the
  487. * refuse list, those refused page are then released at the end of the
  488. * inflation cycle.
  489. */
  490. static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
  491. bool is_2m_pages, unsigned int *target)
  492. {
  493. int locked, hv_status;
  494. struct page *page = b->page;
  495. struct vmballoon_page_size *page_size = &b->page_sizes[false];
  496. /* is_2m_pages can never happen as 2m pages support implies batching */
  497. locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
  498. target);
  499. if (locked > 0) {
  500. STATS_INC(b->stats.refused_alloc[false]);
  501. if (hv_status == VMW_BALLOON_ERROR_RESET ||
  502. hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
  503. vmballoon_free_page(page, false);
  504. return -EIO;
  505. }
  506. /*
  507. * Place page on the list of non-balloonable pages
  508. * and retry allocation, unless we already accumulated
  509. * too many of them, in which case take a breather.
  510. */
  511. if (page_size->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
  512. page_size->n_refused_pages++;
  513. list_add(&page->lru, &page_size->refused_pages);
  514. } else {
  515. vmballoon_free_page(page, false);
  516. }
  517. return -EIO;
  518. }
  519. /* track allocated page */
  520. list_add(&page->lru, &page_size->pages);
  521. /* update balloon size */
  522. b->size++;
  523. return 0;
  524. }
  525. static int vmballoon_lock_batched_page(struct vmballoon *b,
  526. unsigned int num_pages, bool is_2m_pages, unsigned int *target)
  527. {
  528. int locked, i;
  529. u16 size_per_page = vmballoon_page_size(is_2m_pages);
  530. locked = vmballoon_send_batched_lock(b, num_pages, is_2m_pages,
  531. target);
  532. if (locked > 0) {
  533. for (i = 0; i < num_pages; i++) {
  534. u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
  535. struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
  536. vmballoon_free_page(p, is_2m_pages);
  537. }
  538. return -EIO;
  539. }
  540. for (i = 0; i < num_pages; i++) {
  541. u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
  542. struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
  543. struct vmballoon_page_size *page_size =
  544. &b->page_sizes[is_2m_pages];
  545. locked = vmballoon_batch_get_status(b->batch_page, i);
  546. switch (locked) {
  547. case VMW_BALLOON_SUCCESS:
  548. list_add(&p->lru, &page_size->pages);
  549. b->size += size_per_page;
  550. break;
  551. case VMW_BALLOON_ERROR_PPN_PINNED:
  552. case VMW_BALLOON_ERROR_PPN_INVALID:
  553. if (page_size->n_refused_pages
  554. < VMW_BALLOON_MAX_REFUSED) {
  555. list_add(&p->lru, &page_size->refused_pages);
  556. page_size->n_refused_pages++;
  557. break;
  558. }
  559. /* Fallthrough */
  560. case VMW_BALLOON_ERROR_RESET:
  561. case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
  562. vmballoon_free_page(p, is_2m_pages);
  563. break;
  564. default:
  565. /* This should never happen */
  566. WARN_ON_ONCE(true);
  567. }
  568. }
  569. return 0;
  570. }
  571. /*
  572. * Release the page allocated for the balloon. Note that we first notify
  573. * the host so it can make sure the page will be available for the guest
  574. * to use, if needed.
  575. */
  576. static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages,
  577. bool is_2m_pages, unsigned int *target)
  578. {
  579. struct page *page = b->page;
  580. struct vmballoon_page_size *page_size = &b->page_sizes[false];
  581. /* is_2m_pages can never happen as 2m pages support implies batching */
  582. if (!vmballoon_send_unlock_page(b, page_to_pfn(page), target)) {
  583. list_add(&page->lru, &page_size->pages);
  584. return -EIO;
  585. }
  586. /* deallocate page */
  587. vmballoon_free_page(page, false);
  588. STATS_INC(b->stats.free[false]);
  589. /* update balloon size */
  590. b->size--;
  591. return 0;
  592. }
  593. static int vmballoon_unlock_batched_page(struct vmballoon *b,
  594. unsigned int num_pages, bool is_2m_pages,
  595. unsigned int *target)
  596. {
  597. int locked, i, ret = 0;
  598. bool hv_success;
  599. u16 size_per_page = vmballoon_page_size(is_2m_pages);
  600. hv_success = vmballoon_send_batched_unlock(b, num_pages, is_2m_pages,
  601. target);
  602. if (!hv_success)
  603. ret = -EIO;
  604. for (i = 0; i < num_pages; i++) {
  605. u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
  606. struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
  607. struct vmballoon_page_size *page_size =
  608. &b->page_sizes[is_2m_pages];
  609. locked = vmballoon_batch_get_status(b->batch_page, i);
  610. if (!hv_success || locked != VMW_BALLOON_SUCCESS) {
  611. /*
  612. * That page wasn't successfully unlocked by the
  613. * hypervisor, re-add it to the list of pages owned by
  614. * the balloon driver.
  615. */
  616. list_add(&p->lru, &page_size->pages);
  617. } else {
  618. /* deallocate page */
  619. vmballoon_free_page(p, is_2m_pages);
  620. STATS_INC(b->stats.free[is_2m_pages]);
  621. /* update balloon size */
  622. b->size -= size_per_page;
  623. }
  624. }
  625. return ret;
  626. }
  627. /*
  628. * Release pages that were allocated while attempting to inflate the
  629. * balloon but were refused by the host for one reason or another.
  630. */
  631. static void vmballoon_release_refused_pages(struct vmballoon *b,
  632. bool is_2m_pages)
  633. {
  634. struct page *page, *next;
  635. struct vmballoon_page_size *page_size =
  636. &b->page_sizes[is_2m_pages];
  637. list_for_each_entry_safe(page, next, &page_size->refused_pages, lru) {
  638. list_del(&page->lru);
  639. vmballoon_free_page(page, is_2m_pages);
  640. STATS_INC(b->stats.refused_free[is_2m_pages]);
  641. }
  642. page_size->n_refused_pages = 0;
  643. }
  644. static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p)
  645. {
  646. b->page = p;
  647. }
  648. static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
  649. struct page *p)
  650. {
  651. vmballoon_batch_set_pa(b->batch_page, idx,
  652. (u64)page_to_pfn(p) << PAGE_SHIFT);
  653. }
  654. /*
  655. * Inflate the balloon towards its target size. Note that we try to limit
  656. * the rate of allocation to make sure we are not choking the rest of the
  657. * system.
  658. */
  659. static void vmballoon_inflate(struct vmballoon *b)
  660. {
  661. unsigned rate;
  662. unsigned int allocations = 0;
  663. unsigned int num_pages = 0;
  664. int error = 0;
  665. gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
  666. bool is_2m_pages;
  667. pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
  668. /*
  669. * First try NOSLEEP page allocations to inflate balloon.
  670. *
  671. * If we do not throttle nosleep allocations, we can drain all
  672. * free pages in the guest quickly (if the balloon target is high).
  673. * As a side-effect, draining free pages helps to inform (force)
  674. * the guest to start swapping if balloon target is not met yet,
  675. * which is a desired behavior. However, balloon driver can consume
  676. * all available CPU cycles if too many pages are allocated in a
  677. * second. Therefore, we throttle nosleep allocations even when
  678. * the guest is not under memory pressure. OTOH, if we have already
  679. * predicted that the guest is under memory pressure, then we
  680. * slowdown page allocations considerably.
  681. */
  682. /*
  683. * Start with no sleep allocation rate which may be higher
  684. * than sleeping allocation rate.
  685. */
  686. if (b->slow_allocation_cycles) {
  687. rate = b->rate_alloc;
  688. is_2m_pages = false;
  689. } else {
  690. rate = UINT_MAX;
  691. is_2m_pages =
  692. b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
  693. }
  694. pr_debug("%s - goal: %d, no-sleep rate: %u, sleep rate: %d\n",
  695. __func__, b->target - b->size, rate, b->rate_alloc);
  696. while (!b->reset_required &&
  697. b->size + num_pages * vmballoon_page_size(is_2m_pages)
  698. < b->target) {
  699. struct page *page;
  700. if (flags == VMW_PAGE_ALLOC_NOSLEEP)
  701. STATS_INC(b->stats.alloc[is_2m_pages]);
  702. else
  703. STATS_INC(b->stats.sleep_alloc);
  704. page = vmballoon_alloc_page(flags, is_2m_pages);
  705. if (!page) {
  706. STATS_INC(b->stats.alloc_fail[is_2m_pages]);
  707. if (is_2m_pages) {
  708. b->ops->lock(b, num_pages, true, &b->target);
  709. /*
  710. * ignore errors from locking as we now switch
  711. * to 4k pages and we might get different
  712. * errors.
  713. */
  714. num_pages = 0;
  715. is_2m_pages = false;
  716. continue;
  717. }
  718. if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
  719. /*
  720. * CANSLEEP page allocation failed, so guest
  721. * is under severe memory pressure. Quickly
  722. * decrease allocation rate.
  723. */
  724. b->rate_alloc = max(b->rate_alloc / 2,
  725. VMW_BALLOON_RATE_ALLOC_MIN);
  726. STATS_INC(b->stats.sleep_alloc_fail);
  727. break;
  728. }
  729. /*
  730. * NOSLEEP page allocation failed, so the guest is
  731. * under memory pressure. Let us slow down page
  732. * allocations for next few cycles so that the guest
  733. * gets out of memory pressure. Also, if we already
  734. * allocated b->rate_alloc pages, let's pause,
  735. * otherwise switch to sleeping allocations.
  736. */
  737. b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
  738. if (allocations >= b->rate_alloc)
  739. break;
  740. flags = VMW_PAGE_ALLOC_CANSLEEP;
  741. /* Lower rate for sleeping allocations. */
  742. rate = b->rate_alloc;
  743. continue;
  744. }
  745. b->ops->add_page(b, num_pages++, page);
  746. if (num_pages == b->batch_max_pages) {
  747. error = b->ops->lock(b, num_pages, is_2m_pages,
  748. &b->target);
  749. num_pages = 0;
  750. if (error)
  751. break;
  752. }
  753. cond_resched();
  754. if (allocations >= rate) {
  755. /* We allocated enough pages, let's take a break. */
  756. break;
  757. }
  758. }
  759. if (num_pages > 0)
  760. b->ops->lock(b, num_pages, is_2m_pages, &b->target);
  761. /*
  762. * We reached our goal without failures so try increasing
  763. * allocation rate.
  764. */
  765. if (error == 0 && allocations >= b->rate_alloc) {
  766. unsigned int mult = allocations / b->rate_alloc;
  767. b->rate_alloc =
  768. min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
  769. VMW_BALLOON_RATE_ALLOC_MAX);
  770. }
  771. vmballoon_release_refused_pages(b, true);
  772. vmballoon_release_refused_pages(b, false);
  773. }
  774. /*
  775. * Decrease the size of the balloon allowing guest to use more memory.
  776. */
  777. static void vmballoon_deflate(struct vmballoon *b)
  778. {
  779. unsigned is_2m_pages;
  780. pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
  781. /* free pages to reach target */
  782. for (is_2m_pages = 0; is_2m_pages < b->supported_page_sizes;
  783. is_2m_pages++) {
  784. struct page *page, *next;
  785. unsigned int num_pages = 0;
  786. struct vmballoon_page_size *page_size =
  787. &b->page_sizes[is_2m_pages];
  788. list_for_each_entry_safe(page, next, &page_size->pages, lru) {
  789. if (b->reset_required ||
  790. (b->target > 0 &&
  791. b->size - num_pages
  792. * vmballoon_page_size(is_2m_pages)
  793. < b->target + vmballoon_page_size(true)))
  794. break;
  795. list_del(&page->lru);
  796. b->ops->add_page(b, num_pages++, page);
  797. if (num_pages == b->batch_max_pages) {
  798. int error;
  799. error = b->ops->unlock(b, num_pages,
  800. is_2m_pages, &b->target);
  801. num_pages = 0;
  802. if (error)
  803. return;
  804. }
  805. cond_resched();
  806. }
  807. if (num_pages > 0)
  808. b->ops->unlock(b, num_pages, is_2m_pages, &b->target);
  809. }
  810. }
  811. static const struct vmballoon_ops vmballoon_basic_ops = {
  812. .add_page = vmballoon_add_page,
  813. .lock = vmballoon_lock_page,
  814. .unlock = vmballoon_unlock_page
  815. };
  816. static const struct vmballoon_ops vmballoon_batched_ops = {
  817. .add_page = vmballoon_add_batched_page,
  818. .lock = vmballoon_lock_batched_page,
  819. .unlock = vmballoon_unlock_batched_page
  820. };
  821. static bool vmballoon_init_batching(struct vmballoon *b)
  822. {
  823. struct page *page;
  824. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  825. if (!page)
  826. return false;
  827. b->batch_page = page_address(page);
  828. return true;
  829. }
  830. /*
  831. * Receive notification and resize balloon
  832. */
  833. static void vmballoon_doorbell(void *client_data)
  834. {
  835. struct vmballoon *b = client_data;
  836. STATS_INC(b->stats.doorbell);
  837. mod_delayed_work(system_freezable_wq, &b->dwork, 0);
  838. }
  839. /*
  840. * Clean up vmci doorbell
  841. */
  842. static void vmballoon_vmci_cleanup(struct vmballoon *b)
  843. {
  844. int error;
  845. VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, VMCI_INVALID_ID,
  846. VMCI_INVALID_ID, error);
  847. STATS_INC(b->stats.doorbell_unset);
  848. if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
  849. vmci_doorbell_destroy(b->vmci_doorbell);
  850. b->vmci_doorbell = VMCI_INVALID_HANDLE;
  851. }
  852. }
  853. /*
  854. * Initialize vmci doorbell, to get notified as soon as balloon changes
  855. */
  856. static int vmballoon_vmci_init(struct vmballoon *b)
  857. {
  858. int error = 0;
  859. if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) {
  860. error = vmci_doorbell_create(&b->vmci_doorbell,
  861. VMCI_FLAG_DELAYED_CB,
  862. VMCI_PRIVILEGE_FLAG_RESTRICTED,
  863. vmballoon_doorbell, b);
  864. if (error == VMCI_SUCCESS) {
  865. VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET,
  866. b->vmci_doorbell.context,
  867. b->vmci_doorbell.resource, error);
  868. STATS_INC(b->stats.doorbell_set);
  869. }
  870. }
  871. if (error != 0) {
  872. vmballoon_vmci_cleanup(b);
  873. return -EIO;
  874. }
  875. return 0;
  876. }
  877. /*
  878. * Perform standard reset sequence by popping the balloon (in case it
  879. * is not empty) and then restarting protocol. This operation normally
  880. * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
  881. */
  882. static void vmballoon_reset(struct vmballoon *b)
  883. {
  884. int error;
  885. vmballoon_vmci_cleanup(b);
  886. /* free all pages, skipping monitor unlock */
  887. vmballoon_pop(b);
  888. if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
  889. return;
  890. if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
  891. b->ops = &vmballoon_batched_ops;
  892. b->batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
  893. if (!vmballoon_init_batching(b)) {
  894. /*
  895. * We failed to initialize batching, inform the monitor
  896. * about it by sending a null capability.
  897. *
  898. * The guest will retry in one second.
  899. */
  900. vmballoon_send_start(b, 0);
  901. return;
  902. }
  903. } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
  904. b->ops = &vmballoon_basic_ops;
  905. b->batch_max_pages = 1;
  906. }
  907. b->reset_required = false;
  908. error = vmballoon_vmci_init(b);
  909. if (error)
  910. pr_err("failed to initialize vmci doorbell\n");
  911. if (!vmballoon_send_guest_id(b))
  912. pr_err("failed to send guest ID to the host\n");
  913. }
  914. /*
  915. * Balloon work function: reset protocol, if needed, get the new size and
  916. * adjust balloon as needed. Repeat in 1 sec.
  917. */
  918. static void vmballoon_work(struct work_struct *work)
  919. {
  920. struct delayed_work *dwork = to_delayed_work(work);
  921. struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
  922. unsigned int target;
  923. STATS_INC(b->stats.timer);
  924. if (b->reset_required)
  925. vmballoon_reset(b);
  926. if (b->slow_allocation_cycles > 0)
  927. b->slow_allocation_cycles--;
  928. if (!b->reset_required && vmballoon_send_get_target(b, &target)) {
  929. /* update target, adjust size */
  930. b->target = target;
  931. if (b->size < target)
  932. vmballoon_inflate(b);
  933. else if (target == 0 ||
  934. b->size > target + vmballoon_page_size(true))
  935. vmballoon_deflate(b);
  936. }
  937. /*
  938. * We are using a freezable workqueue so that balloon operations are
  939. * stopped while the system transitions to/from sleep/hibernation.
  940. */
  941. queue_delayed_work(system_freezable_wq,
  942. dwork, round_jiffies_relative(HZ));
  943. }
  944. /*
  945. * DEBUGFS Interface
  946. */
  947. #ifdef CONFIG_DEBUG_FS
  948. static int vmballoon_debug_show(struct seq_file *f, void *offset)
  949. {
  950. struct vmballoon *b = f->private;
  951. struct vmballoon_stats *stats = &b->stats;
  952. /* format capabilities info */
  953. seq_printf(f,
  954. "balloon capabilities: %#4x\n"
  955. "used capabilities: %#4lx\n"
  956. "is resetting: %c\n",
  957. VMW_BALLOON_CAPABILITIES, b->capabilities,
  958. b->reset_required ? 'y' : 'n');
  959. /* format size info */
  960. seq_printf(f,
  961. "target: %8d pages\n"
  962. "current: %8d pages\n",
  963. b->target, b->size);
  964. /* format rate info */
  965. seq_printf(f,
  966. "rateSleepAlloc: %8d pages/sec\n",
  967. b->rate_alloc);
  968. seq_printf(f,
  969. "\n"
  970. "timer: %8u\n"
  971. "doorbell: %8u\n"
  972. "start: %8u (%4u failed)\n"
  973. "guestType: %8u (%4u failed)\n"
  974. "2m-lock: %8u (%4u failed)\n"
  975. "lock: %8u (%4u failed)\n"
  976. "2m-unlock: %8u (%4u failed)\n"
  977. "unlock: %8u (%4u failed)\n"
  978. "target: %8u (%4u failed)\n"
  979. "prim2mAlloc: %8u (%4u failed)\n"
  980. "primNoSleepAlloc: %8u (%4u failed)\n"
  981. "primCanSleepAlloc: %8u (%4u failed)\n"
  982. "prim2mFree: %8u\n"
  983. "primFree: %8u\n"
  984. "err2mAlloc: %8u\n"
  985. "errAlloc: %8u\n"
  986. "err2mFree: %8u\n"
  987. "errFree: %8u\n"
  988. "doorbellSet: %8u\n"
  989. "doorbellUnset: %8u\n",
  990. stats->timer,
  991. stats->doorbell,
  992. stats->start, stats->start_fail,
  993. stats->guest_type, stats->guest_type_fail,
  994. stats->lock[true], stats->lock_fail[true],
  995. stats->lock[false], stats->lock_fail[false],
  996. stats->unlock[true], stats->unlock_fail[true],
  997. stats->unlock[false], stats->unlock_fail[false],
  998. stats->target, stats->target_fail,
  999. stats->alloc[true], stats->alloc_fail[true],
  1000. stats->alloc[false], stats->alloc_fail[false],
  1001. stats->sleep_alloc, stats->sleep_alloc_fail,
  1002. stats->free[true],
  1003. stats->free[false],
  1004. stats->refused_alloc[true], stats->refused_alloc[false],
  1005. stats->refused_free[true], stats->refused_free[false],
  1006. stats->doorbell_set, stats->doorbell_unset);
  1007. return 0;
  1008. }
  1009. static int vmballoon_debug_open(struct inode *inode, struct file *file)
  1010. {
  1011. return single_open(file, vmballoon_debug_show, inode->i_private);
  1012. }
  1013. static const struct file_operations vmballoon_debug_fops = {
  1014. .owner = THIS_MODULE,
  1015. .open = vmballoon_debug_open,
  1016. .read = seq_read,
  1017. .llseek = seq_lseek,
  1018. .release = single_release,
  1019. };
  1020. static int __init vmballoon_debugfs_init(struct vmballoon *b)
  1021. {
  1022. int error;
  1023. b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
  1024. &vmballoon_debug_fops);
  1025. if (IS_ERR(b->dbg_entry)) {
  1026. error = PTR_ERR(b->dbg_entry);
  1027. pr_err("failed to create debugfs entry, error: %d\n", error);
  1028. return error;
  1029. }
  1030. return 0;
  1031. }
  1032. static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
  1033. {
  1034. debugfs_remove(b->dbg_entry);
  1035. }
  1036. #else
  1037. static inline int vmballoon_debugfs_init(struct vmballoon *b)
  1038. {
  1039. return 0;
  1040. }
  1041. static inline void vmballoon_debugfs_exit(struct vmballoon *b)
  1042. {
  1043. }
  1044. #endif /* CONFIG_DEBUG_FS */
  1045. static int __init vmballoon_init(void)
  1046. {
  1047. int error;
  1048. unsigned is_2m_pages;
  1049. /*
  1050. * Check if we are running on VMware's hypervisor and bail out
  1051. * if we are not.
  1052. */
  1053. if (x86_hyper != &x86_hyper_vmware)
  1054. return -ENODEV;
  1055. for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
  1056. is_2m_pages++) {
  1057. INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].pages);
  1058. INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages);
  1059. }
  1060. /* initialize rates */
  1061. balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
  1062. INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
  1063. error = vmballoon_debugfs_init(&balloon);
  1064. if (error)
  1065. return error;
  1066. balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
  1067. balloon.batch_page = NULL;
  1068. balloon.page = NULL;
  1069. balloon.reset_required = true;
  1070. queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
  1071. return 0;
  1072. }
  1073. module_init(vmballoon_init);
  1074. static void __exit vmballoon_exit(void)
  1075. {
  1076. vmballoon_vmci_cleanup(&balloon);
  1077. cancel_delayed_work_sync(&balloon.dwork);
  1078. vmballoon_debugfs_exit(&balloon);
  1079. /*
  1080. * Deallocate all reserved memory, and reset connection with monitor.
  1081. * Reset connection before deallocating memory to avoid potential for
  1082. * additional spurious resets from guest touching deallocated pages.
  1083. */
  1084. vmballoon_send_start(&balloon, 0);
  1085. vmballoon_pop(&balloon);
  1086. }
  1087. module_exit(vmballoon_exit);