balloon_compaction.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /*
  2. * mm/balloon_compaction.c
  3. *
  4. * Common interface for making balloon pages movable by compaction.
  5. *
  6. * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com>
  7. */
  8. #include <linux/mm.h>
  9. #include <linux/slab.h>
  10. #include <linux/export.h>
  11. #include <linux/balloon_compaction.h>
  12. /*
  13. * balloon_page_enqueue - allocates a new page and inserts it into the balloon
  14. * page list.
  15. * @b_dev_info: balloon device descriptor where we will insert a new page to
  16. *
  17. * Driver must call it to properly allocate a new enlisted balloon page
  18. * before definitively removing it from the guest system.
  19. * This function returns the page address for the recently enqueued page or
  20. * NULL in the case we fail to allocate a new page this turn.
  21. */
  22. struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
  23. {
  24. unsigned long flags;
  25. struct page *page = alloc_page(balloon_mapping_gfp_mask() |
  26. __GFP_NOMEMALLOC | __GFP_NORETRY);
  27. if (!page)
  28. return NULL;
  29. /*
  30. * Block others from accessing the 'page' when we get around to
  31. * establishing additional references. We should be the only one
  32. * holding a reference to the 'page' at this point.
  33. */
  34. BUG_ON(!trylock_page(page));
  35. spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  36. balloon_page_insert(b_dev_info, page);
  37. __count_vm_event(BALLOON_INFLATE);
  38. spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  39. unlock_page(page);
  40. return page;
  41. }
  42. EXPORT_SYMBOL_GPL(balloon_page_enqueue);
  43. /*
  44. * balloon_page_dequeue - removes a page from balloon's page list and returns
  45. * the its address to allow the driver release the page.
  46. * @b_dev_info: balloon device decriptor where we will grab a page from.
  47. *
  48. * Driver must call it to properly de-allocate a previous enlisted balloon page
  49. * before definetively releasing it back to the guest system.
  50. * This function returns the page address for the recently dequeued page or
  51. * NULL in the case we find balloon's page list temporarily empty due to
  52. * compaction isolated pages.
  53. */
  54. struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
  55. {
  56. struct page *page, *tmp;
  57. unsigned long flags;
  58. bool dequeued_page;
  59. dequeued_page = false;
  60. spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  61. list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
  62. /*
  63. * Block others from accessing the 'page' while we get around
  64. * establishing additional references and preparing the 'page'
  65. * to be released by the balloon driver.
  66. */
  67. if (trylock_page(page)) {
  68. #ifdef CONFIG_BALLOON_COMPACTION
  69. if (PageIsolated(page)) {
  70. /* raced with isolation */
  71. unlock_page(page);
  72. continue;
  73. }
  74. #endif
  75. balloon_page_delete(page);
  76. __count_vm_event(BALLOON_DEFLATE);
  77. unlock_page(page);
  78. dequeued_page = true;
  79. break;
  80. }
  81. }
  82. spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  83. if (!dequeued_page) {
  84. /*
  85. * If we are unable to dequeue a balloon page because the page
  86. * list is empty and there is no isolated pages, then something
  87. * went out of track and some balloon pages are lost.
  88. * BUG() here, otherwise the balloon driver may get stuck into
  89. * an infinite loop while attempting to release all its pages.
  90. */
  91. spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  92. if (unlikely(list_empty(&b_dev_info->pages) &&
  93. !b_dev_info->isolated_pages))
  94. BUG();
  95. spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  96. page = NULL;
  97. }
  98. return page;
  99. }
  100. EXPORT_SYMBOL_GPL(balloon_page_dequeue);
  101. #ifdef CONFIG_BALLOON_COMPACTION
  102. bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
  103. {
  104. struct balloon_dev_info *b_dev_info = balloon_page_device(page);
  105. unsigned long flags;
  106. spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  107. list_del(&page->lru);
  108. b_dev_info->isolated_pages++;
  109. spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  110. return true;
  111. }
  112. void balloon_page_putback(struct page *page)
  113. {
  114. struct balloon_dev_info *b_dev_info = balloon_page_device(page);
  115. unsigned long flags;
  116. spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  117. list_add(&page->lru, &b_dev_info->pages);
  118. b_dev_info->isolated_pages--;
  119. spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  120. }
  121. /* move_to_new_page() counterpart for a ballooned page */
  122. int balloon_page_migrate(struct address_space *mapping,
  123. struct page *newpage, struct page *page,
  124. enum migrate_mode mode)
  125. {
  126. struct balloon_dev_info *balloon = balloon_page_device(page);
  127. VM_BUG_ON_PAGE(!PageLocked(page), page);
  128. VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
  129. return balloon->migratepage(balloon, newpage, page, mode);
  130. }
  131. const struct address_space_operations balloon_aops = {
  132. .migratepage = balloon_page_migrate,
  133. .isolate_page = balloon_page_isolate,
  134. .putback_page = balloon_page_putback,
  135. };
  136. EXPORT_SYMBOL_GPL(balloon_aops);
  137. #endif /* CONFIG_BALLOON_COMPACTION */