drm_mm.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. /**************************************************************************
  2. *
  3. * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
  4. * Copyright 2016 Intel Corporation
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. *
  28. **************************************************************************/
  29. /*
  30. * Authors:
  31. * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  32. */
  33. #ifndef _DRM_MM_H_
  34. #define _DRM_MM_H_
  35. /*
  36. * Generic range manager structs
  37. */
  38. #include <linux/bug.h>
  39. #include <linux/rbtree.h>
  40. #include <linux/kernel.h>
  41. #include <linux/mm_types.h>
  42. #include <linux/list.h>
  43. #include <linux/spinlock.h>
  44. #ifdef CONFIG_DRM_DEBUG_MM
  45. #include <linux/stackdepot.h>
  46. #endif
  47. #include <drm/drm_print.h>
  48. #ifdef CONFIG_DRM_DEBUG_MM
  49. #define DRM_MM_BUG_ON(expr) BUG_ON(expr)
  50. #else
  51. #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
  52. #endif
  53. /**
  54. * enum drm_mm_insert_mode - control search and allocation behaviour
  55. *
  56. * The &struct drm_mm range manager supports finding a suitable modes using
  57. * a number of search trees. These trees are oranised by size, by address and
  58. * in most recent eviction order. This allows the user to find either the
  59. * smallest hole to reuse, the lowest or highest address to reuse, or simply
  60. * reuse the most recent eviction that fits. When allocating the &drm_mm_node
  61. * from within the hole, the &drm_mm_insert_mode also dictate whether to
  62. * allocate the lowest matching address or the highest.
  63. */
  64. enum drm_mm_insert_mode {
  65. /**
  66. * @DRM_MM_INSERT_BEST:
  67. *
  68. * Search for the smallest hole (within the search range) that fits
  69. * the desired node.
  70. *
  71. * Allocates the node from the bottom of the found hole.
  72. */
  73. DRM_MM_INSERT_BEST = 0,
  74. /**
  75. * @DRM_MM_INSERT_LOW:
  76. *
  77. * Search for the lowest hole (address closest to 0, within the search
  78. * range) that fits the desired node.
  79. *
  80. * Allocates the node from the bottom of the found hole.
  81. */
  82. DRM_MM_INSERT_LOW,
  83. /**
  84. * @DRM_MM_INSERT_HIGH:
  85. *
  86. * Search for the highest hole (address closest to U64_MAX, within the
  87. * search range) that fits the desired node.
  88. *
  89. * Allocates the node from the *top* of the found hole. The specified
  90. * alignment for the node is applied to the base of the node
  91. * (&drm_mm_node.start).
  92. */
  93. DRM_MM_INSERT_HIGH,
  94. /**
  95. * @DRM_MM_INSERT_EVICT:
  96. *
  97. * Search for the most recently evicted hole (within the search range)
  98. * that fits the desired node. This is appropriate for use immediately
  99. * after performing an eviction scan (see drm_mm_scan_init()) and
  100. * removing the selected nodes to form a hole.
  101. *
  102. * Allocates the node from the bottom of the found hole.
  103. */
  104. DRM_MM_INSERT_EVICT,
  105. /**
  106. * @DRM_MM_INSERT_ONCE:
  107. *
  108. * Only check the first hole for suitablity and report -ENOSPC
  109. * immediately otherwise, rather than check every hole until a
  110. * suitable one is found. Can only be used in conjunction with another
  111. * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW.
  112. */
  113. DRM_MM_INSERT_ONCE = BIT(31),
  114. /**
  115. * @DRM_MM_INSERT_HIGHEST:
  116. *
  117. * Only check the highest hole (the hole with the largest address) and
  118. * insert the node at the top of the hole or report -ENOSPC if
  119. * unsuitable.
  120. *
  121. * Does not search all holes.
  122. */
  123. DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE,
  124. /**
  125. * @DRM_MM_INSERT_LOWEST:
  126. *
  127. * Only check the lowest hole (the hole with the smallest address) and
  128. * insert the node at the bottom of the hole or report -ENOSPC if
  129. * unsuitable.
  130. *
  131. * Does not search all holes.
  132. */
  133. DRM_MM_INSERT_LOWEST = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE,
  134. };
  135. /**
  136. * struct drm_mm_node - allocated block in the DRM allocator
  137. *
  138. * This represents an allocated block in a &drm_mm allocator. Except for
  139. * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is
  140. * entirely opaque and should only be accessed through the provided funcions.
  141. * Since allocation of these nodes is entirely handled by the driver they can be
  142. * embedded.
  143. */
  144. struct drm_mm_node {
  145. /** @color: Opaque driver-private tag. */
  146. unsigned long color;
  147. /** @start: Start address of the allocated block. */
  148. u64 start;
  149. /** @size: Size of the allocated block. */
  150. u64 size;
  151. /* private: */
  152. struct drm_mm *mm;
  153. struct list_head node_list;
  154. struct list_head hole_stack;
  155. struct rb_node rb;
  156. struct rb_node rb_hole_size;
  157. struct rb_node rb_hole_addr;
  158. u64 __subtree_last;
  159. u64 hole_size;
  160. bool allocated : 1;
  161. bool scanned_block : 1;
  162. #ifdef CONFIG_DRM_DEBUG_MM
  163. depot_stack_handle_t stack;
  164. #endif
  165. };
  166. /**
  167. * struct drm_mm - DRM allocator
  168. *
  169. * DRM range allocator with a few special functions and features geared towards
  170. * managing GPU memory. Except for the @color_adjust callback the structure is
  171. * entirely opaque and should only be accessed through the provided functions
  172. * and macros. This structure can be embedded into larger driver structures.
  173. */
  174. struct drm_mm {
  175. /**
  176. * @color_adjust:
  177. *
  178. * Optional driver callback to further apply restrictions on a hole. The
  179. * node argument points at the node containing the hole from which the
  180. * block would be allocated (see drm_mm_hole_follows() and friends). The
  181. * other arguments are the size of the block to be allocated. The driver
  182. * can adjust the start and end as needed to e.g. insert guard pages.
  183. */
  184. void (*color_adjust)(const struct drm_mm_node *node,
  185. unsigned long color,
  186. u64 *start, u64 *end);
  187. /* private: */
  188. /* List of all memory nodes that immediately precede a free hole. */
  189. struct list_head hole_stack;
  190. /* head_node.node_list is the list of all memory nodes, ordered
  191. * according to the (increasing) start address of the memory node. */
  192. struct drm_mm_node head_node;
  193. /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
  194. struct rb_root_cached interval_tree;
  195. struct rb_root_cached holes_size;
  196. struct rb_root holes_addr;
  197. unsigned long scan_active;
  198. };
  199. /**
  200. * struct drm_mm_scan - DRM allocator eviction roaster data
  201. *
  202. * This structure tracks data needed for the eviction roaster set up using
  203. * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and
  204. * drm_mm_scan_remove_block(). The structure is entirely opaque and should only
  205. * be accessed through the provided functions and macros. It is meant to be
  206. * allocated temporarily by the driver on the stack.
  207. */
  208. struct drm_mm_scan {
  209. /* private: */
  210. struct drm_mm *mm;
  211. u64 size;
  212. u64 alignment;
  213. u64 remainder_mask;
  214. u64 range_start;
  215. u64 range_end;
  216. u64 hit_start;
  217. u64 hit_end;
  218. unsigned long color;
  219. enum drm_mm_insert_mode mode;
  220. };
  221. /**
  222. * drm_mm_node_allocated - checks whether a node is allocated
  223. * @node: drm_mm_node to check
  224. *
  225. * Drivers are required to clear a node prior to using it with the
  226. * drm_mm range manager.
  227. *
  228. * Drivers should use this helper for proper encapsulation of drm_mm
  229. * internals.
  230. *
  231. * Returns:
  232. * True if the @node is allocated.
  233. */
  234. static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
  235. {
  236. return node->allocated;
  237. }
  238. /**
  239. * drm_mm_initialized - checks whether an allocator is initialized
  240. * @mm: drm_mm to check
  241. *
  242. * Drivers should clear the struct drm_mm prior to initialisation if they
  243. * want to use this function.
  244. *
  245. * Drivers should use this helper for proper encapsulation of drm_mm
  246. * internals.
  247. *
  248. * Returns:
  249. * True if the @mm is initialized.
  250. */
  251. static inline bool drm_mm_initialized(const struct drm_mm *mm)
  252. {
  253. return mm->hole_stack.next;
  254. }
  255. /**
  256. * drm_mm_hole_follows - checks whether a hole follows this node
  257. * @node: drm_mm_node to check
  258. *
  259. * Holes are embedded into the drm_mm using the tail of a drm_mm_node.
  260. * If you wish to know whether a hole follows this particular node,
  261. * query this function. See also drm_mm_hole_node_start() and
  262. * drm_mm_hole_node_end().
  263. *
  264. * Returns:
  265. * True if a hole follows the @node.
  266. */
  267. static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
  268. {
  269. return node->hole_size;
  270. }
  271. static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
  272. {
  273. return hole_node->start + hole_node->size;
  274. }
  275. /**
  276. * drm_mm_hole_node_start - computes the start of the hole following @node
  277. * @hole_node: drm_mm_node which implicitly tracks the following hole
  278. *
  279. * This is useful for driver-specific debug dumpers. Otherwise drivers should
  280. * not inspect holes themselves. Drivers must check first whether a hole indeed
  281. * follows by looking at drm_mm_hole_follows()
  282. *
  283. * Returns:
  284. * Start of the subsequent hole.
  285. */
  286. static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
  287. {
  288. DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node));
  289. return __drm_mm_hole_node_start(hole_node);
  290. }
  291. static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
  292. {
  293. return list_next_entry(hole_node, node_list)->start;
  294. }
  295. /**
  296. * drm_mm_hole_node_end - computes the end of the hole following @node
  297. * @hole_node: drm_mm_node which implicitly tracks the following hole
  298. *
  299. * This is useful for driver-specific debug dumpers. Otherwise drivers should
  300. * not inspect holes themselves. Drivers must check first whether a hole indeed
  301. * follows by looking at drm_mm_hole_follows().
  302. *
  303. * Returns:
  304. * End of the subsequent hole.
  305. */
  306. static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
  307. {
  308. return __drm_mm_hole_node_end(hole_node);
  309. }
  310. /**
  311. * drm_mm_nodes - list of nodes under the drm_mm range manager
  312. * @mm: the struct drm_mm range manger
  313. *
  314. * As the drm_mm range manager hides its node_list deep with its
  315. * structure, extracting it looks painful and repetitive. This is
  316. * not expected to be used outside of the drm_mm_for_each_node()
  317. * macros and similar internal functions.
  318. *
  319. * Returns:
  320. * The node list, may be empty.
  321. */
  322. #define drm_mm_nodes(mm) (&(mm)->head_node.node_list)
  323. /**
  324. * drm_mm_for_each_node - iterator to walk over all allocated nodes
  325. * @entry: &struct drm_mm_node to assign to in each iteration step
  326. * @mm: &drm_mm allocator to walk
  327. *
  328. * This iterator walks over all nodes in the range allocator. It is implemented
  329. * with list_for_each(), so not save against removal of elements.
  330. */
  331. #define drm_mm_for_each_node(entry, mm) \
  332. list_for_each_entry(entry, drm_mm_nodes(mm), node_list)
  333. /**
  334. * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
  335. * @entry: &struct drm_mm_node to assign to in each iteration step
  336. * @next: &struct drm_mm_node to store the next step
  337. * @mm: &drm_mm allocator to walk
  338. *
  339. * This iterator walks over all nodes in the range allocator. It is implemented
  340. * with list_for_each_safe(), so save against removal of elements.
  341. */
  342. #define drm_mm_for_each_node_safe(entry, next, mm) \
  343. list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
  344. /**
  345. * drm_mm_for_each_hole - iterator to walk over all holes
  346. * @pos: &drm_mm_node used internally to track progress
  347. * @mm: &drm_mm allocator to walk
  348. * @hole_start: ulong variable to assign the hole start to on each iteration
  349. * @hole_end: ulong variable to assign the hole end to on each iteration
  350. *
  351. * This iterator walks over all holes in the range allocator. It is implemented
  352. * with list_for_each(), so not save against removal of elements. @entry is used
  353. * internally and will not reflect a real drm_mm_node for the very first hole.
  354. * Hence users of this iterator may not access it.
  355. *
  356. * Implementation Note:
  357. * We need to inline list_for_each_entry in order to be able to set hole_start
  358. * and hole_end on each iteration while keeping the macro sane.
  359. */
  360. #define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \
  361. for (pos = list_first_entry(&(mm)->hole_stack, \
  362. typeof(*pos), hole_stack); \
  363. &pos->hole_stack != &(mm)->hole_stack ? \
  364. hole_start = drm_mm_hole_node_start(pos), \
  365. hole_end = hole_start + pos->hole_size, \
  366. 1 : 0; \
  367. pos = list_next_entry(pos, hole_stack))
  368. /*
  369. * Basic range manager support (drm_mm.c)
  370. */
  371. int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
  372. int drm_mm_insert_node_in_range(struct drm_mm *mm,
  373. struct drm_mm_node *node,
  374. u64 size,
  375. u64 alignment,
  376. unsigned long color,
  377. u64 start,
  378. u64 end,
  379. enum drm_mm_insert_mode mode);
  380. /**
  381. * drm_mm_insert_node_generic - search for space and insert @node
  382. * @mm: drm_mm to allocate from
  383. * @node: preallocate node to insert
  384. * @size: size of the allocation
  385. * @alignment: alignment of the allocation
  386. * @color: opaque tag value to use for this node
  387. * @mode: fine-tune the allocation search and placement
  388. *
  389. * This is a simplified version of drm_mm_insert_node_in_range() with no
  390. * range restrictions applied.
  391. *
  392. * The preallocated node must be cleared to 0.
  393. *
  394. * Returns:
  395. * 0 on success, -ENOSPC if there's no suitable hole.
  396. */
  397. static inline int
  398. drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
  399. u64 size, u64 alignment,
  400. unsigned long color,
  401. enum drm_mm_insert_mode mode)
  402. {
  403. return drm_mm_insert_node_in_range(mm, node,
  404. size, alignment, color,
  405. 0, U64_MAX, mode);
  406. }
  407. /**
  408. * drm_mm_insert_node - search for space and insert @node
  409. * @mm: drm_mm to allocate from
  410. * @node: preallocate node to insert
  411. * @size: size of the allocation
  412. *
  413. * This is a simplified version of drm_mm_insert_node_generic() with @color set
  414. * to 0.
  415. *
  416. * The preallocated node must be cleared to 0.
  417. *
  418. * Returns:
  419. * 0 on success, -ENOSPC if there's no suitable hole.
  420. */
  421. static inline int drm_mm_insert_node(struct drm_mm *mm,
  422. struct drm_mm_node *node,
  423. u64 size)
  424. {
  425. return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
  426. }
  427. void drm_mm_remove_node(struct drm_mm_node *node);
  428. void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
  429. void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
  430. void drm_mm_takedown(struct drm_mm *mm);
  431. /**
  432. * drm_mm_clean - checks whether an allocator is clean
  433. * @mm: drm_mm allocator to check
  434. *
  435. * Returns:
  436. * True if the allocator is completely free, false if there's still a node
  437. * allocated in it.
  438. */
  439. static inline bool drm_mm_clean(const struct drm_mm *mm)
  440. {
  441. return list_empty(drm_mm_nodes(mm));
  442. }
  443. struct drm_mm_node *
  444. __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
  445. /**
  446. * drm_mm_for_each_node_in_range - iterator to walk over a range of
  447. * allocated nodes
  448. * @node__: drm_mm_node structure to assign to in each iteration step
  449. * @mm__: drm_mm allocator to walk
  450. * @start__: starting offset, the first node will overlap this
  451. * @end__: ending offset, the last node will start before this (but may overlap)
  452. *
  453. * This iterator walks over all nodes in the range allocator that lie
  454. * between @start and @end. It is implemented similarly to list_for_each(),
  455. * but using the internal interval tree to accelerate the search for the
  456. * starting node, and so not safe against removal of elements. It assumes
  457. * that @end is within (or is the upper limit of) the drm_mm allocator.
  458. * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk
  459. * over the special _unallocated_ &drm_mm.head_node, and may even continue
  460. * indefinitely.
  461. */
  462. #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \
  463. for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
  464. node__->start < (end__); \
  465. node__ = list_next_entry(node__, node_list))
  466. void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
  467. struct drm_mm *mm,
  468. u64 size, u64 alignment, unsigned long color,
  469. u64 start, u64 end,
  470. enum drm_mm_insert_mode mode);
  471. /**
  472. * drm_mm_scan_init - initialize lru scanning
  473. * @scan: scan state
  474. * @mm: drm_mm to scan
  475. * @size: size of the allocation
  476. * @alignment: alignment of the allocation
  477. * @color: opaque tag value to use for the allocation
  478. * @mode: fine-tune the allocation search and placement
  479. *
  480. * This is a simplified version of drm_mm_scan_init_with_range() with no range
  481. * restrictions applied.
  482. *
  483. * This simply sets up the scanning routines with the parameters for the desired
  484. * hole.
  485. *
  486. * Warning:
  487. * As long as the scan list is non-empty, no other operations than
  488. * adding/removing nodes to/from the scan list are allowed.
  489. */
  490. static inline void drm_mm_scan_init(struct drm_mm_scan *scan,
  491. struct drm_mm *mm,
  492. u64 size,
  493. u64 alignment,
  494. unsigned long color,
  495. enum drm_mm_insert_mode mode)
  496. {
  497. drm_mm_scan_init_with_range(scan, mm,
  498. size, alignment, color,
  499. 0, U64_MAX, mode);
  500. }
  501. bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
  502. struct drm_mm_node *node);
  503. bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
  504. struct drm_mm_node *node);
  505. struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan);
  506. void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p);
  507. #endif