amdgpu_gtt_mgr.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Christian König
  23. */
  24. #include <drm/drmP.h>
  25. #include "amdgpu.h"
  26. struct amdgpu_gtt_mgr {
  27. struct drm_mm mm;
  28. spinlock_t lock;
  29. atomic64_t available;
  30. };
  31. struct amdgpu_gtt_node {
  32. struct drm_mm_node node;
  33. struct ttm_buffer_object *tbo;
  34. };
  35. /**
  36. * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
  37. *
  38. * @man: TTM memory type manager
  39. * @p_size: maximum size of GTT
  40. *
  41. * Allocate and initialize the GTT manager.
  42. */
  43. static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
  44. unsigned long p_size)
  45. {
  46. struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
  47. struct amdgpu_gtt_mgr *mgr;
  48. uint64_t start, size;
  49. mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
  50. if (!mgr)
  51. return -ENOMEM;
  52. start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
  53. size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
  54. drm_mm_init(&mgr->mm, start, size);
  55. spin_lock_init(&mgr->lock);
  56. atomic64_set(&mgr->available, p_size);
  57. man->priv = mgr;
  58. return 0;
  59. }
  60. /**
  61. * amdgpu_gtt_mgr_fini - free and destroy GTT manager
  62. *
  63. * @man: TTM memory type manager
  64. *
  65. * Destroy and free the GTT manager, returns -EBUSY if ranges are still
  66. * allocated inside it.
  67. */
  68. static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
  69. {
  70. struct amdgpu_gtt_mgr *mgr = man->priv;
  71. spin_lock(&mgr->lock);
  72. drm_mm_takedown(&mgr->mm);
  73. spin_unlock(&mgr->lock);
  74. kfree(mgr);
  75. man->priv = NULL;
  76. return 0;
  77. }
  78. /**
  79. * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
  80. *
  81. * @mem: the mem object to check
  82. *
  83. * Check if a mem object has already address space allocated.
  84. */
  85. bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
  86. {
  87. struct amdgpu_gtt_node *node = mem->mm_node;
  88. return (node->node.start != AMDGPU_BO_INVALID_OFFSET);
  89. }
  90. /**
  91. * amdgpu_gtt_mgr_alloc - allocate new ranges
  92. *
  93. * @man: TTM memory type manager
  94. * @tbo: TTM BO we need this range for
  95. * @place: placement flags and restrictions
  96. * @mem: the resulting mem object
  97. *
  98. * Allocate the address space for a node.
  99. */
  100. static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
  101. struct ttm_buffer_object *tbo,
  102. const struct ttm_place *place,
  103. struct ttm_mem_reg *mem)
  104. {
  105. struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
  106. struct amdgpu_gtt_mgr *mgr = man->priv;
  107. struct amdgpu_gtt_node *node = mem->mm_node;
  108. enum drm_mm_insert_mode mode;
  109. unsigned long fpfn, lpfn;
  110. int r;
  111. if (amdgpu_gtt_mgr_has_gart_addr(mem))
  112. return 0;
  113. if (place)
  114. fpfn = place->fpfn;
  115. else
  116. fpfn = 0;
  117. if (place && place->lpfn)
  118. lpfn = place->lpfn;
  119. else
  120. lpfn = adev->gart.num_cpu_pages;
  121. mode = DRM_MM_INSERT_BEST;
  122. if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
  123. mode = DRM_MM_INSERT_HIGH;
  124. spin_lock(&mgr->lock);
  125. r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
  126. mem->page_alignment, 0, fpfn, lpfn,
  127. mode);
  128. spin_unlock(&mgr->lock);
  129. if (!r)
  130. mem->start = node->node.start;
  131. return r;
  132. }
  133. /**
  134. * amdgpu_gtt_mgr_new - allocate a new node
  135. *
  136. * @man: TTM memory type manager
  137. * @tbo: TTM BO we need this range for
  138. * @place: placement flags and restrictions
  139. * @mem: the resulting mem object
  140. *
  141. * Dummy, allocate the node but no space for it yet.
  142. */
  143. static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
  144. struct ttm_buffer_object *tbo,
  145. const struct ttm_place *place,
  146. struct ttm_mem_reg *mem)
  147. {
  148. struct amdgpu_gtt_mgr *mgr = man->priv;
  149. struct amdgpu_gtt_node *node;
  150. int r;
  151. spin_lock(&mgr->lock);
  152. if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
  153. atomic64_read(&mgr->available) < mem->num_pages) {
  154. spin_unlock(&mgr->lock);
  155. return 0;
  156. }
  157. atomic64_sub(mem->num_pages, &mgr->available);
  158. spin_unlock(&mgr->lock);
  159. node = kzalloc(sizeof(*node), GFP_KERNEL);
  160. if (!node) {
  161. r = -ENOMEM;
  162. goto err_out;
  163. }
  164. node->node.start = AMDGPU_BO_INVALID_OFFSET;
  165. node->node.size = mem->num_pages;
  166. node->tbo = tbo;
  167. mem->mm_node = node;
  168. if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
  169. r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem);
  170. if (unlikely(r)) {
  171. kfree(node);
  172. mem->mm_node = NULL;
  173. r = 0;
  174. goto err_out;
  175. }
  176. } else {
  177. mem->start = node->node.start;
  178. }
  179. return 0;
  180. err_out:
  181. atomic64_add(mem->num_pages, &mgr->available);
  182. return r;
  183. }
  184. /**
  185. * amdgpu_gtt_mgr_del - free ranges
  186. *
  187. * @man: TTM memory type manager
  188. * @tbo: TTM BO we need this range for
  189. * @place: placement flags and restrictions
  190. * @mem: TTM memory object
  191. *
  192. * Free the allocated GTT again.
  193. */
  194. static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
  195. struct ttm_mem_reg *mem)
  196. {
  197. struct amdgpu_gtt_mgr *mgr = man->priv;
  198. struct amdgpu_gtt_node *node = mem->mm_node;
  199. if (!node)
  200. return;
  201. spin_lock(&mgr->lock);
  202. if (node->node.start != AMDGPU_BO_INVALID_OFFSET)
  203. drm_mm_remove_node(&node->node);
  204. spin_unlock(&mgr->lock);
  205. atomic64_add(mem->num_pages, &mgr->available);
  206. kfree(node);
  207. mem->mm_node = NULL;
  208. }
  209. /**
  210. * amdgpu_gtt_mgr_usage - return usage of GTT domain
  211. *
  212. * @man: TTM memory type manager
  213. *
  214. * Return how many bytes are used in the GTT domain
  215. */
  216. uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
  217. {
  218. struct amdgpu_gtt_mgr *mgr = man->priv;
  219. s64 result = man->size - atomic64_read(&mgr->available);
  220. return (result > 0 ? result : 0) * PAGE_SIZE;
  221. }
  222. int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man)
  223. {
  224. struct amdgpu_gtt_mgr *mgr = man->priv;
  225. struct amdgpu_gtt_node *node;
  226. struct drm_mm_node *mm_node;
  227. int r = 0;
  228. spin_lock(&mgr->lock);
  229. drm_mm_for_each_node(mm_node, &mgr->mm) {
  230. node = container_of(mm_node, struct amdgpu_gtt_node, node);
  231. r = amdgpu_ttm_recover_gart(node->tbo);
  232. if (r)
  233. break;
  234. }
  235. spin_unlock(&mgr->lock);
  236. return r;
  237. }
  238. /**
  239. * amdgpu_gtt_mgr_debug - dump VRAM table
  240. *
  241. * @man: TTM memory type manager
  242. * @printer: DRM printer to use
  243. *
  244. * Dump the table content using printk.
  245. */
  246. static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
  247. struct drm_printer *printer)
  248. {
  249. struct amdgpu_gtt_mgr *mgr = man->priv;
  250. spin_lock(&mgr->lock);
  251. drm_mm_print(&mgr->mm, printer);
  252. spin_unlock(&mgr->lock);
  253. drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n",
  254. man->size, (u64)atomic64_read(&mgr->available),
  255. amdgpu_gtt_mgr_usage(man) >> 20);
  256. }
  257. const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
  258. .init = amdgpu_gtt_mgr_init,
  259. .takedown = amdgpu_gtt_mgr_fini,
  260. .get_node = amdgpu_gtt_mgr_new,
  261. .put_node = amdgpu_gtt_mgr_del,
  262. .debug = amdgpu_gtt_mgr_debug
  263. };