msm_gem_shrinker.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. /*
  2. * Copyright (C) 2016 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "msm_drv.h"
  18. #include "msm_gem.h"
  19. static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
  20. {
  21. /* NOTE: we are *closer* to being able to get rid of
  22. * mutex_trylock_recursive().. the msm_gem code itself does
  23. * not need struct_mutex, although codepaths that can trigger
  24. * shrinker are still called in code-paths that hold the
  25. * struct_mutex.
  26. *
  27. * Also, msm_obj->madv is protected by struct_mutex.
  28. *
  29. * The next step is probably split out a seperate lock for
  30. * protecting inactive_list, so that shrinker does not need
  31. * struct_mutex.
  32. */
  33. switch (mutex_trylock_recursive(&dev->struct_mutex)) {
  34. case MUTEX_TRYLOCK_FAILED:
  35. return false;
  36. case MUTEX_TRYLOCK_SUCCESS:
  37. *unlock = true;
  38. return true;
  39. case MUTEX_TRYLOCK_RECURSIVE:
  40. *unlock = false;
  41. return true;
  42. }
  43. BUG();
  44. }
  45. static unsigned long
  46. msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
  47. {
  48. struct msm_drm_private *priv =
  49. container_of(shrinker, struct msm_drm_private, shrinker);
  50. struct drm_device *dev = priv->dev;
  51. struct msm_gem_object *msm_obj;
  52. unsigned long count = 0;
  53. bool unlock;
  54. if (!msm_gem_shrinker_lock(dev, &unlock))
  55. return 0;
  56. list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
  57. if (is_purgeable(msm_obj))
  58. count += msm_obj->base.size >> PAGE_SHIFT;
  59. }
  60. if (unlock)
  61. mutex_unlock(&dev->struct_mutex);
  62. return count;
  63. }
  64. static unsigned long
  65. msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
  66. {
  67. struct msm_drm_private *priv =
  68. container_of(shrinker, struct msm_drm_private, shrinker);
  69. struct drm_device *dev = priv->dev;
  70. struct msm_gem_object *msm_obj;
  71. unsigned long freed = 0;
  72. bool unlock;
  73. if (!msm_gem_shrinker_lock(dev, &unlock))
  74. return SHRINK_STOP;
  75. list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
  76. if (freed >= sc->nr_to_scan)
  77. break;
  78. if (is_purgeable(msm_obj)) {
  79. msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
  80. freed += msm_obj->base.size >> PAGE_SHIFT;
  81. }
  82. }
  83. if (unlock)
  84. mutex_unlock(&dev->struct_mutex);
  85. if (freed > 0)
  86. pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
  87. return freed;
  88. }
  89. static int
  90. msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
  91. {
  92. struct msm_drm_private *priv =
  93. container_of(nb, struct msm_drm_private, vmap_notifier);
  94. struct drm_device *dev = priv->dev;
  95. struct msm_gem_object *msm_obj;
  96. unsigned unmapped = 0;
  97. bool unlock;
  98. if (!msm_gem_shrinker_lock(dev, &unlock))
  99. return NOTIFY_DONE;
  100. list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
  101. if (is_vunmapable(msm_obj)) {
  102. msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
  103. /* since we don't know any better, lets bail after a few
  104. * and if necessary the shrinker will be invoked again.
  105. * Seems better than unmapping *everything*
  106. */
  107. if (++unmapped >= 15)
  108. break;
  109. }
  110. }
  111. if (unlock)
  112. mutex_unlock(&dev->struct_mutex);
  113. *(unsigned long *)ptr += unmapped;
  114. if (unmapped > 0)
  115. pr_info_ratelimited("Purging %u vmaps\n", unmapped);
  116. return NOTIFY_DONE;
  117. }
  118. /**
  119. * msm_gem_shrinker_init - Initialize msm shrinker
  120. * @dev_priv: msm device
  121. *
  122. * This function registers and sets up the msm shrinker.
  123. */
  124. void msm_gem_shrinker_init(struct drm_device *dev)
  125. {
  126. struct msm_drm_private *priv = dev->dev_private;
  127. priv->shrinker.count_objects = msm_gem_shrinker_count;
  128. priv->shrinker.scan_objects = msm_gem_shrinker_scan;
  129. priv->shrinker.seeks = DEFAULT_SEEKS;
  130. WARN_ON(register_shrinker(&priv->shrinker));
  131. priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
  132. WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
  133. }
  134. /**
  135. * msm_gem_shrinker_cleanup - Clean up msm shrinker
  136. * @dev_priv: msm device
  137. *
  138. * This function unregisters the msm shrinker.
  139. */
  140. void msm_gem_shrinker_cleanup(struct drm_device *dev)
  141. {
  142. struct msm_drm_private *priv = dev->dev_private;
  143. if (priv->shrinker.nr_deferred) {
  144. WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
  145. unregister_shrinker(&priv->shrinker);
  146. }
  147. }