page-states.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. /*
  2. * Copyright IBM Corp. 2008
  3. *
  4. * Guest page hinting for unused pages.
  5. *
  6. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/errno.h>
  10. #include <linux/types.h>
  11. #include <linux/mm.h>
  12. #include <linux/gfp.h>
  13. #include <linux/init.h>
  14. #define ESSA_SET_STABLE 1
  15. #define ESSA_SET_UNUSED 2
  16. static int cmma_flag = 1;
  17. static int __init cmma(char *str)
  18. {
  19. char *parm;
  20. parm = strstrip(str);
  21. if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
  22. cmma_flag = 1;
  23. return 1;
  24. }
  25. cmma_flag = 0;
  26. if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
  27. return 1;
  28. return 0;
  29. }
  30. __setup("cmma=", cmma);
  31. void __init cmma_init(void)
  32. {
  33. register unsigned long tmp asm("0") = 0;
  34. register int rc asm("1") = -EOPNOTSUPP;
  35. if (!cmma_flag)
  36. return;
  37. asm volatile(
  38. " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
  39. "0: la %0,0\n"
  40. "1:\n"
  41. EX_TABLE(0b,1b)
  42. : "+&d" (rc), "+&d" (tmp));
  43. if (rc)
  44. cmma_flag = 0;
  45. }
  46. static inline void set_page_unstable(struct page *page, int order)
  47. {
  48. int i, rc;
  49. for (i = 0; i < (1 << order); i++)
  50. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  51. : "=&d" (rc)
  52. : "a" (page_to_phys(page + i)),
  53. "i" (ESSA_SET_UNUSED));
  54. }
  55. void arch_free_page(struct page *page, int order)
  56. {
  57. if (!cmma_flag)
  58. return;
  59. set_page_unstable(page, order);
  60. }
  61. static inline void set_page_stable(struct page *page, int order)
  62. {
  63. int i, rc;
  64. for (i = 0; i < (1 << order); i++)
  65. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  66. : "=&d" (rc)
  67. : "a" (page_to_phys(page + i)),
  68. "i" (ESSA_SET_STABLE));
  69. }
  70. void arch_alloc_page(struct page *page, int order)
  71. {
  72. if (!cmma_flag)
  73. return;
  74. set_page_stable(page, order);
  75. }
  76. void arch_set_page_states(int make_stable)
  77. {
  78. unsigned long flags, order, t;
  79. struct list_head *l;
  80. struct page *page;
  81. struct zone *zone;
  82. if (!cmma_flag)
  83. return;
  84. if (make_stable)
  85. drain_local_pages(NULL);
  86. for_each_populated_zone(zone) {
  87. spin_lock_irqsave(&zone->lock, flags);
  88. for_each_migratetype_order(order, t) {
  89. list_for_each(l, &zone->free_area[order].free_list[t]) {
  90. page = list_entry(l, struct page, lru);
  91. if (make_stable)
  92. set_page_stable(page, order);
  93. else
  94. set_page_unstable(page, order);
  95. }
  96. }
  97. spin_unlock_irqrestore(&zone->lock, flags);
  98. }
  99. }