vmwgfx_bo.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. #include <drm/ttm/ttm_placement.h>
  29. #include <drm/drmP.h>
  30. #include "vmwgfx_drv.h"
  31. #include "drm/ttm/ttm_object.h"
  32. /**
  33. * struct vmw_user_buffer_object - User-space-visible buffer object
  34. *
  35. * @prime: The prime object providing user visibility.
  36. * @vbo: The struct vmw_buffer_object
  37. */
  38. struct vmw_user_buffer_object {
  39. struct ttm_prime_object prime;
  40. struct vmw_buffer_object vbo;
  41. };
  42. /**
  43. * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
  44. * vmw_buffer_object.
  45. *
  46. * @bo: Pointer to the TTM buffer object.
  47. * Return: Pointer to the struct vmw_buffer_object embedding the
  48. * TTM buffer object.
  49. */
  50. static struct vmw_buffer_object *
  51. vmw_buffer_object(struct ttm_buffer_object *bo)
  52. {
  53. return container_of(bo, struct vmw_buffer_object, base);
  54. }
  55. /**
  56. * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
  57. * vmw_user_buffer_object.
  58. *
  59. * @bo: Pointer to the TTM buffer object.
  60. * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
  61. * object.
  62. */
  63. static struct vmw_user_buffer_object *
  64. vmw_user_buffer_object(struct ttm_buffer_object *bo)
  65. {
  66. struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
  67. return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
  68. }
  69. /**
  70. * vmw_bo_pin_in_placement - Validate a buffer to placement.
  71. *
  72. * @dev_priv: Driver private.
  73. * @buf: DMA buffer to move.
  74. * @placement: The placement to pin it.
  75. * @interruptible: Use interruptible wait.
  76. * Return: Zero on success, Negative error code on failure. In particular
  77. * -ERESTARTSYS if interrupted by a signal
  78. */
  79. int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
  80. struct vmw_buffer_object *buf,
  81. struct ttm_placement *placement,
  82. bool interruptible)
  83. {
  84. struct ttm_operation_ctx ctx = {interruptible, false };
  85. struct ttm_buffer_object *bo = &buf->base;
  86. int ret;
  87. uint32_t new_flags;
  88. ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
  89. if (unlikely(ret != 0))
  90. return ret;
  91. vmw_execbuf_release_pinned_bo(dev_priv);
  92. ret = ttm_bo_reserve(bo, interruptible, false, NULL);
  93. if (unlikely(ret != 0))
  94. goto err;
  95. if (buf->pin_count > 0)
  96. ret = ttm_bo_mem_compat(placement, &bo->mem,
  97. &new_flags) == true ? 0 : -EINVAL;
  98. else
  99. ret = ttm_bo_validate(bo, placement, &ctx);
  100. if (!ret)
  101. vmw_bo_pin_reserved(buf, true);
  102. ttm_bo_unreserve(bo);
  103. err:
  104. ttm_write_unlock(&dev_priv->reservation_sem);
  105. return ret;
  106. }
  107. /**
  108. * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
  109. *
  110. * This function takes the reservation_sem in write mode.
  111. * Flushes and unpins the query bo to avoid failures.
  112. *
  113. * @dev_priv: Driver private.
  114. * @buf: DMA buffer to move.
  115. * @pin: Pin buffer if true.
  116. * @interruptible: Use interruptible wait.
  117. * Return: Zero on success, Negative error code on failure. In particular
  118. * -ERESTARTSYS if interrupted by a signal
  119. */
  120. int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
  121. struct vmw_buffer_object *buf,
  122. bool interruptible)
  123. {
  124. struct ttm_operation_ctx ctx = {interruptible, false };
  125. struct ttm_buffer_object *bo = &buf->base;
  126. int ret;
  127. uint32_t new_flags;
  128. ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
  129. if (unlikely(ret != 0))
  130. return ret;
  131. vmw_execbuf_release_pinned_bo(dev_priv);
  132. ret = ttm_bo_reserve(bo, interruptible, false, NULL);
  133. if (unlikely(ret != 0))
  134. goto err;
  135. if (buf->pin_count > 0) {
  136. ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
  137. &new_flags) == true ? 0 : -EINVAL;
  138. goto out_unreserve;
  139. }
  140. ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
  141. if (likely(ret == 0) || ret == -ERESTARTSYS)
  142. goto out_unreserve;
  143. ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
  144. out_unreserve:
  145. if (!ret)
  146. vmw_bo_pin_reserved(buf, true);
  147. ttm_bo_unreserve(bo);
  148. err:
  149. ttm_write_unlock(&dev_priv->reservation_sem);
  150. return ret;
  151. }
  152. /**
  153. * vmw_bo_pin_in_vram - Move a buffer to vram.
  154. *
  155. * This function takes the reservation_sem in write mode.
  156. * Flushes and unpins the query bo to avoid failures.
  157. *
  158. * @dev_priv: Driver private.
  159. * @buf: DMA buffer to move.
  160. * @interruptible: Use interruptible wait.
  161. * Return: Zero on success, Negative error code on failure. In particular
  162. * -ERESTARTSYS if interrupted by a signal
  163. */
  164. int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
  165. struct vmw_buffer_object *buf,
  166. bool interruptible)
  167. {
  168. return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
  169. interruptible);
  170. }
  171. /**
  172. * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
  173. *
  174. * This function takes the reservation_sem in write mode.
  175. * Flushes and unpins the query bo to avoid failures.
  176. *
  177. * @dev_priv: Driver private.
  178. * @buf: DMA buffer to pin.
  179. * @interruptible: Use interruptible wait.
  180. * Return: Zero on success, Negative error code on failure. In particular
  181. * -ERESTARTSYS if interrupted by a signal
  182. */
  183. int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
  184. struct vmw_buffer_object *buf,
  185. bool interruptible)
  186. {
  187. struct ttm_operation_ctx ctx = {interruptible, false };
  188. struct ttm_buffer_object *bo = &buf->base;
  189. struct ttm_placement placement;
  190. struct ttm_place place;
  191. int ret = 0;
  192. uint32_t new_flags;
  193. place = vmw_vram_placement.placement[0];
  194. place.lpfn = bo->num_pages;
  195. placement.num_placement = 1;
  196. placement.placement = &place;
  197. placement.num_busy_placement = 1;
  198. placement.busy_placement = &place;
  199. ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
  200. if (unlikely(ret != 0))
  201. return ret;
  202. vmw_execbuf_release_pinned_bo(dev_priv);
  203. ret = ttm_bo_reserve(bo, interruptible, false, NULL);
  204. if (unlikely(ret != 0))
  205. goto err_unlock;
  206. /*
  207. * Is this buffer already in vram but not at the start of it?
  208. * In that case, evict it first because TTM isn't good at handling
  209. * that situation.
  210. */
  211. if (bo->mem.mem_type == TTM_PL_VRAM &&
  212. bo->mem.start < bo->num_pages &&
  213. bo->mem.start > 0 &&
  214. buf->pin_count == 0) {
  215. ctx.interruptible = false;
  216. (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
  217. }
  218. if (buf->pin_count > 0)
  219. ret = ttm_bo_mem_compat(&placement, &bo->mem,
  220. &new_flags) == true ? 0 : -EINVAL;
  221. else
  222. ret = ttm_bo_validate(bo, &placement, &ctx);
  223. /* For some reason we didn't end up at the start of vram */
  224. WARN_ON(ret == 0 && bo->offset != 0);
  225. if (!ret)
  226. vmw_bo_pin_reserved(buf, true);
  227. ttm_bo_unreserve(bo);
  228. err_unlock:
  229. ttm_write_unlock(&dev_priv->reservation_sem);
  230. return ret;
  231. }
  232. /**
  233. * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
  234. *
  235. * This function takes the reservation_sem in write mode.
  236. *
  237. * @dev_priv: Driver private.
  238. * @buf: DMA buffer to unpin.
  239. * @interruptible: Use interruptible wait.
  240. * Return: Zero on success, Negative error code on failure. In particular
  241. * -ERESTARTSYS if interrupted by a signal
  242. */
  243. int vmw_bo_unpin(struct vmw_private *dev_priv,
  244. struct vmw_buffer_object *buf,
  245. bool interruptible)
  246. {
  247. struct ttm_buffer_object *bo = &buf->base;
  248. int ret;
  249. ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
  250. if (unlikely(ret != 0))
  251. return ret;
  252. ret = ttm_bo_reserve(bo, interruptible, false, NULL);
  253. if (unlikely(ret != 0))
  254. goto err;
  255. vmw_bo_pin_reserved(buf, false);
  256. ttm_bo_unreserve(bo);
  257. err:
  258. ttm_read_unlock(&dev_priv->reservation_sem);
  259. return ret;
  260. }
  261. /**
  262. * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
  263. * of a buffer.
  264. *
  265. * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
  266. * @ptr: SVGAGuestPtr returning the result.
  267. */
  268. void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
  269. SVGAGuestPtr *ptr)
  270. {
  271. if (bo->mem.mem_type == TTM_PL_VRAM) {
  272. ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
  273. ptr->offset = bo->offset;
  274. } else {
  275. ptr->gmrId = bo->mem.start;
  276. ptr->offset = 0;
  277. }
  278. }
  279. /**
  280. * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
  281. *
  282. * @vbo: The buffer object. Must be reserved.
  283. * @pin: Whether to pin or unpin.
  284. *
  285. */
  286. void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
  287. {
  288. struct ttm_operation_ctx ctx = { false, true };
  289. struct ttm_place pl;
  290. struct ttm_placement placement;
  291. struct ttm_buffer_object *bo = &vbo->base;
  292. uint32_t old_mem_type = bo->mem.mem_type;
  293. int ret;
  294. lockdep_assert_held(&bo->resv->lock.base);
  295. if (pin) {
  296. if (vbo->pin_count++ > 0)
  297. return;
  298. } else {
  299. WARN_ON(vbo->pin_count <= 0);
  300. if (--vbo->pin_count > 0)
  301. return;
  302. }
  303. pl.fpfn = 0;
  304. pl.lpfn = 0;
  305. pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
  306. | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
  307. if (pin)
  308. pl.flags |= TTM_PL_FLAG_NO_EVICT;
  309. memset(&placement, 0, sizeof(placement));
  310. placement.num_placement = 1;
  311. placement.placement = &pl;
  312. ret = ttm_bo_validate(bo, &placement, &ctx);
  313. BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
  314. }
  315. /**
  316. * vmw_bo_map_and_cache - Map a buffer object and cache the map
  317. *
  318. * @vbo: The buffer object to map
  319. * Return: A kernel virtual address or NULL if mapping failed.
  320. *
  321. * This function maps a buffer object into the kernel address space, or
  322. * returns the virtual kernel address of an already existing map. The virtual
  323. * address remains valid as long as the buffer object is pinned or reserved.
  324. * The cached map is torn down on either
  325. * 1) Buffer object move
  326. * 2) Buffer object swapout
  327. * 3) Buffer object destruction
  328. *
  329. */
  330. void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
  331. {
  332. struct ttm_buffer_object *bo = &vbo->base;
  333. bool not_used;
  334. void *virtual;
  335. int ret;
  336. virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
  337. if (virtual)
  338. return virtual;
  339. ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
  340. if (ret)
  341. DRM_ERROR("Buffer object map failed: %d.\n", ret);
  342. return ttm_kmap_obj_virtual(&vbo->map, &not_used);
  343. }
  344. /**
  345. * vmw_bo_unmap - Tear down a cached buffer object map.
  346. *
  347. * @vbo: The buffer object whose map we are tearing down.
  348. *
  349. * This function tears down a cached map set up using
  350. * vmw_buffer_object_map_and_cache().
  351. */
  352. void vmw_bo_unmap(struct vmw_buffer_object *vbo)
  353. {
  354. if (vbo->map.bo == NULL)
  355. return;
  356. ttm_bo_kunmap(&vbo->map);
  357. }
  358. /**
  359. * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
  360. *
  361. * @dev_priv: Pointer to a struct vmw_private identifying the device.
  362. * @size: The requested buffer size.
  363. * @user: Whether this is an ordinary dma buffer or a user dma buffer.
  364. */
  365. static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
  366. bool user)
  367. {
  368. static size_t struct_size, user_struct_size;
  369. size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
  370. size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
  371. if (unlikely(struct_size == 0)) {
  372. size_t backend_size = ttm_round_pot(vmw_tt_size);
  373. struct_size = backend_size +
  374. ttm_round_pot(sizeof(struct vmw_buffer_object));
  375. user_struct_size = backend_size +
  376. ttm_round_pot(sizeof(struct vmw_user_buffer_object));
  377. }
  378. if (dev_priv->map_mode == vmw_dma_alloc_coherent)
  379. page_array_size +=
  380. ttm_round_pot(num_pages * sizeof(dma_addr_t));
  381. return ((user) ? user_struct_size : struct_size) +
  382. page_array_size;
  383. }
  384. /**
  385. * vmw_bo_bo_free - vmw buffer object destructor
  386. *
  387. * @bo: Pointer to the embedded struct ttm_buffer_object
  388. */
  389. void vmw_bo_bo_free(struct ttm_buffer_object *bo)
  390. {
  391. struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
  392. vmw_bo_unmap(vmw_bo);
  393. kfree(vmw_bo);
  394. }
  395. /**
  396. * vmw_user_bo_destroy - vmw buffer object destructor
  397. *
  398. * @bo: Pointer to the embedded struct ttm_buffer_object
  399. */
  400. static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
  401. {
  402. struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
  403. vmw_bo_unmap(&vmw_user_bo->vbo);
  404. ttm_prime_object_kfree(vmw_user_bo, prime);
  405. }
  406. /**
  407. * vmw_bo_init - Initialize a vmw buffer object
  408. *
  409. * @dev_priv: Pointer to the device private struct
  410. * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
  411. * @size: Buffer object size in bytes.
  412. * @placement: Initial placement.
  413. * @interruptible: Whether waits should be performed interruptible.
  414. * @bo_free: The buffer object destructor.
  415. * Returns: Zero on success, negative error code on error.
  416. *
  417. * Note that on error, the code will free the buffer object.
  418. */
  419. int vmw_bo_init(struct vmw_private *dev_priv,
  420. struct vmw_buffer_object *vmw_bo,
  421. size_t size, struct ttm_placement *placement,
  422. bool interruptible,
  423. void (*bo_free)(struct ttm_buffer_object *bo))
  424. {
  425. struct ttm_bo_device *bdev = &dev_priv->bdev;
  426. size_t acc_size;
  427. int ret;
  428. bool user = (bo_free == &vmw_user_bo_destroy);
  429. WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
  430. acc_size = vmw_bo_acc_size(dev_priv, size, user);
  431. memset(vmw_bo, 0, sizeof(*vmw_bo));
  432. INIT_LIST_HEAD(&vmw_bo->res_list);
  433. ret = ttm_bo_init(bdev, &vmw_bo->base, size,
  434. ttm_bo_type_device, placement,
  435. 0, interruptible, acc_size,
  436. NULL, NULL, bo_free);
  437. return ret;
  438. }
  439. /**
  440. * vmw_user_bo_release - TTM reference base object release callback for
  441. * vmw user buffer objects
  442. *
  443. * @p_base: The TTM base object pointer about to be unreferenced.
  444. *
  445. * Clears the TTM base object pointer and drops the reference the
  446. * base object has on the underlying struct vmw_buffer_object.
  447. */
  448. static void vmw_user_bo_release(struct ttm_base_object **p_base)
  449. {
  450. struct vmw_user_buffer_object *vmw_user_bo;
  451. struct ttm_base_object *base = *p_base;
  452. struct ttm_buffer_object *bo;
  453. *p_base = NULL;
  454. if (unlikely(base == NULL))
  455. return;
  456. vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
  457. prime.base);
  458. bo = &vmw_user_bo->vbo.base;
  459. ttm_bo_unref(&bo);
  460. }
  461. /**
  462. * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
  463. * for vmw user buffer objects
  464. *
  465. * @base: Pointer to the TTM base object
  466. * @ref_type: Reference type of the reference reaching zero.
  467. *
  468. * Called when user-space drops its last synccpu reference on the buffer
  469. * object, Either explicitly or as part of a cleanup file close.
  470. */
  471. static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
  472. enum ttm_ref_type ref_type)
  473. {
  474. struct vmw_user_buffer_object *user_bo;
  475. user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
  476. switch (ref_type) {
  477. case TTM_REF_SYNCCPU_WRITE:
  478. ttm_bo_synccpu_write_release(&user_bo->vbo.base);
  479. break;
  480. default:
  481. WARN_ONCE(true, "Undefined buffer object reference release.\n");
  482. }
  483. }
  484. /**
  485. * vmw_user_bo_alloc - Allocate a user buffer object
  486. *
  487. * @dev_priv: Pointer to a struct device private.
  488. * @tfile: Pointer to a struct ttm_object_file on which to register the user
  489. * object.
  490. * @size: Size of the buffer object.
  491. * @shareable: Boolean whether the buffer is shareable with other open files.
  492. * @handle: Pointer to where the handle value should be assigned.
  493. * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
  494. * should be assigned.
  495. * Return: Zero on success, negative error code on error.
  496. */
  497. int vmw_user_bo_alloc(struct vmw_private *dev_priv,
  498. struct ttm_object_file *tfile,
  499. uint32_t size,
  500. bool shareable,
  501. uint32_t *handle,
  502. struct vmw_buffer_object **p_vbo,
  503. struct ttm_base_object **p_base)
  504. {
  505. struct vmw_user_buffer_object *user_bo;
  506. struct ttm_buffer_object *tmp;
  507. int ret;
  508. user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
  509. if (unlikely(!user_bo)) {
  510. DRM_ERROR("Failed to allocate a buffer.\n");
  511. return -ENOMEM;
  512. }
  513. ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
  514. (dev_priv->has_mob) ?
  515. &vmw_sys_placement :
  516. &vmw_vram_sys_placement, true,
  517. &vmw_user_bo_destroy);
  518. if (unlikely(ret != 0))
  519. return ret;
  520. tmp = ttm_bo_reference(&user_bo->vbo.base);
  521. ret = ttm_prime_object_init(tfile,
  522. size,
  523. &user_bo->prime,
  524. shareable,
  525. ttm_buffer_type,
  526. &vmw_user_bo_release,
  527. &vmw_user_bo_ref_obj_release);
  528. if (unlikely(ret != 0)) {
  529. ttm_bo_unref(&tmp);
  530. goto out_no_base_object;
  531. }
  532. *p_vbo = &user_bo->vbo;
  533. if (p_base) {
  534. *p_base = &user_bo->prime.base;
  535. kref_get(&(*p_base)->refcount);
  536. }
  537. *handle = user_bo->prime.base.hash.key;
  538. out_no_base_object:
  539. return ret;
  540. }
  541. /**
  542. * vmw_user_bo_verify_access - verify access permissions on this
  543. * buffer object.
  544. *
  545. * @bo: Pointer to the buffer object being accessed
  546. * @tfile: Identifying the caller.
  547. */
  548. int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
  549. struct ttm_object_file *tfile)
  550. {
  551. struct vmw_user_buffer_object *vmw_user_bo;
  552. if (unlikely(bo->destroy != vmw_user_bo_destroy))
  553. return -EPERM;
  554. vmw_user_bo = vmw_user_buffer_object(bo);
  555. /* Check that the caller has opened the object. */
  556. if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
  557. return 0;
  558. DRM_ERROR("Could not grant buffer access.\n");
  559. return -EPERM;
  560. }
  561. /**
  562. * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
  563. * access, idling previous GPU operations on the buffer and optionally
  564. * blocking it for further command submissions.
  565. *
  566. * @user_bo: Pointer to the buffer object being grabbed for CPU access
  567. * @tfile: Identifying the caller.
  568. * @flags: Flags indicating how the grab should be performed.
  569. * Return: Zero on success, Negative error code on error. In particular,
  570. * -EBUSY will be returned if a dontblock operation is requested and the
  571. * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
  572. * interrupted by a signal.
  573. *
  574. * A blocking grab will be automatically released when @tfile is closed.
  575. */
  576. static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
  577. struct ttm_object_file *tfile,
  578. uint32_t flags)
  579. {
  580. struct ttm_buffer_object *bo = &user_bo->vbo.base;
  581. bool existed;
  582. int ret;
  583. if (flags & drm_vmw_synccpu_allow_cs) {
  584. bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
  585. long lret;
  586. lret = reservation_object_wait_timeout_rcu
  587. (bo->resv, true, true,
  588. nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
  589. if (!lret)
  590. return -EBUSY;
  591. else if (lret < 0)
  592. return lret;
  593. return 0;
  594. }
  595. ret = ttm_bo_synccpu_write_grab
  596. (bo, !!(flags & drm_vmw_synccpu_dontblock));
  597. if (unlikely(ret != 0))
  598. return ret;
  599. ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
  600. TTM_REF_SYNCCPU_WRITE, &existed, false);
  601. if (ret != 0 || existed)
  602. ttm_bo_synccpu_write_release(&user_bo->vbo.base);
  603. return ret;
  604. }
  605. /**
  606. * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
  607. * and unblock command submission on the buffer if blocked.
  608. *
  609. * @handle: Handle identifying the buffer object.
  610. * @tfile: Identifying the caller.
  611. * @flags: Flags indicating the type of release.
  612. */
  613. static int vmw_user_bo_synccpu_release(uint32_t handle,
  614. struct ttm_object_file *tfile,
  615. uint32_t flags)
  616. {
  617. if (!(flags & drm_vmw_synccpu_allow_cs))
  618. return ttm_ref_object_base_unref(tfile, handle,
  619. TTM_REF_SYNCCPU_WRITE);
  620. return 0;
  621. }
  622. /**
  623. * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
  624. * functionality.
  625. *
  626. * @dev: Identifies the drm device.
  627. * @data: Pointer to the ioctl argument.
  628. * @file_priv: Identifies the caller.
  629. * Return: Zero on success, negative error code on error.
  630. *
  631. * This function checks the ioctl arguments for validity and calls the
  632. * relevant synccpu functions.
  633. */
  634. int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
  635. struct drm_file *file_priv)
  636. {
  637. struct drm_vmw_synccpu_arg *arg =
  638. (struct drm_vmw_synccpu_arg *) data;
  639. struct vmw_buffer_object *vbo;
  640. struct vmw_user_buffer_object *user_bo;
  641. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  642. struct ttm_base_object *buffer_base;
  643. int ret;
  644. if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
  645. || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
  646. drm_vmw_synccpu_dontblock |
  647. drm_vmw_synccpu_allow_cs)) != 0) {
  648. DRM_ERROR("Illegal synccpu flags.\n");
  649. return -EINVAL;
  650. }
  651. switch (arg->op) {
  652. case drm_vmw_synccpu_grab:
  653. ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
  654. &buffer_base);
  655. if (unlikely(ret != 0))
  656. return ret;
  657. user_bo = container_of(vbo, struct vmw_user_buffer_object,
  658. vbo);
  659. ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
  660. vmw_bo_unreference(&vbo);
  661. ttm_base_object_unref(&buffer_base);
  662. if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
  663. ret != -EBUSY)) {
  664. DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
  665. (unsigned int) arg->handle);
  666. return ret;
  667. }
  668. break;
  669. case drm_vmw_synccpu_release:
  670. ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
  671. arg->flags);
  672. if (unlikely(ret != 0)) {
  673. DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
  674. (unsigned int) arg->handle);
  675. return ret;
  676. }
  677. break;
  678. default:
  679. DRM_ERROR("Invalid synccpu operation.\n");
  680. return -EINVAL;
  681. }
  682. return 0;
  683. }
  684. /**
  685. * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
  686. * allocation functionality.
  687. *
  688. * @dev: Identifies the drm device.
  689. * @data: Pointer to the ioctl argument.
  690. * @file_priv: Identifies the caller.
  691. * Return: Zero on success, negative error code on error.
  692. *
  693. * This function checks the ioctl arguments for validity and allocates a
  694. * struct vmw_user_buffer_object bo.
  695. */
  696. int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
  697. struct drm_file *file_priv)
  698. {
  699. struct vmw_private *dev_priv = vmw_priv(dev);
  700. union drm_vmw_alloc_dmabuf_arg *arg =
  701. (union drm_vmw_alloc_dmabuf_arg *)data;
  702. struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
  703. struct drm_vmw_dmabuf_rep *rep = &arg->rep;
  704. struct vmw_buffer_object *vbo;
  705. uint32_t handle;
  706. int ret;
  707. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  708. if (unlikely(ret != 0))
  709. return ret;
  710. ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
  711. req->size, false, &handle, &vbo,
  712. NULL);
  713. if (unlikely(ret != 0))
  714. goto out_no_bo;
  715. rep->handle = handle;
  716. rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
  717. rep->cur_gmr_id = handle;
  718. rep->cur_gmr_offset = 0;
  719. vmw_bo_unreference(&vbo);
  720. out_no_bo:
  721. ttm_read_unlock(&dev_priv->reservation_sem);
  722. return ret;
  723. }
  724. /**
  725. * vmw_bo_unref_ioctl - Generic handle close ioctl.
  726. *
  727. * @dev: Identifies the drm device.
  728. * @data: Pointer to the ioctl argument.
  729. * @file_priv: Identifies the caller.
  730. * Return: Zero on success, negative error code on error.
  731. *
  732. * This function checks the ioctl arguments for validity and closes a
  733. * handle to a TTM base object, optionally freeing the object.
  734. */
  735. int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
  736. struct drm_file *file_priv)
  737. {
  738. struct drm_vmw_unref_dmabuf_arg *arg =
  739. (struct drm_vmw_unref_dmabuf_arg *)data;
  740. return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  741. arg->handle,
  742. TTM_REF_USAGE);
  743. }
  744. /**
  745. * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
  746. *
  747. * @tfile: The TTM object file the handle is registered with.
  748. * @handle: The user buffer object handle
  749. * @out: Pointer to a where a pointer to the embedded
  750. * struct vmw_buffer_object should be placed.
  751. * @p_base: Pointer to where a pointer to the TTM base object should be
  752. * placed, or NULL if no such pointer is required.
  753. * Return: Zero on success, Negative error code on error.
  754. *
  755. * Both the output base object pointer and the vmw buffer object pointer
  756. * will be refcounted.
  757. */
  758. int vmw_user_bo_lookup(struct ttm_object_file *tfile,
  759. uint32_t handle, struct vmw_buffer_object **out,
  760. struct ttm_base_object **p_base)
  761. {
  762. struct vmw_user_buffer_object *vmw_user_bo;
  763. struct ttm_base_object *base;
  764. base = ttm_base_object_lookup(tfile, handle);
  765. if (unlikely(base == NULL)) {
  766. DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
  767. (unsigned long)handle);
  768. return -ESRCH;
  769. }
  770. if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
  771. ttm_base_object_unref(&base);
  772. DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
  773. (unsigned long)handle);
  774. return -EINVAL;
  775. }
  776. vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
  777. prime.base);
  778. (void)ttm_bo_reference(&vmw_user_bo->vbo.base);
  779. if (p_base)
  780. *p_base = base;
  781. else
  782. ttm_base_object_unref(&base);
  783. *out = &vmw_user_bo->vbo;
  784. return 0;
  785. }
  786. /**
  787. * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
  788. *
  789. * @tfile: The TTM object file to register the handle with.
  790. * @vbo: The embedded vmw buffer object.
  791. * @handle: Pointer to where the new handle should be placed.
  792. * Return: Zero on success, Negative error code on error.
  793. */
  794. int vmw_user_bo_reference(struct ttm_object_file *tfile,
  795. struct vmw_buffer_object *vbo,
  796. uint32_t *handle)
  797. {
  798. struct vmw_user_buffer_object *user_bo;
  799. if (vbo->base.destroy != vmw_user_bo_destroy)
  800. return -EINVAL;
  801. user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
  802. *handle = user_bo->prime.base.hash.key;
  803. return ttm_ref_object_add(tfile, &user_bo->prime.base,
  804. TTM_REF_USAGE, NULL, false);
  805. }
  806. /**
  807. * vmw_bo_fence_single - Utility function to fence a single TTM buffer
  808. * object without unreserving it.
  809. *
  810. * @bo: Pointer to the struct ttm_buffer_object to fence.
  811. * @fence: Pointer to the fence. If NULL, this function will
  812. * insert a fence into the command stream..
  813. *
  814. * Contrary to the ttm_eu version of this function, it takes only
  815. * a single buffer object instead of a list, and it also doesn't
  816. * unreserve the buffer object, which needs to be done separately.
  817. */
  818. void vmw_bo_fence_single(struct ttm_buffer_object *bo,
  819. struct vmw_fence_obj *fence)
  820. {
  821. struct ttm_bo_device *bdev = bo->bdev;
  822. struct vmw_private *dev_priv =
  823. container_of(bdev, struct vmw_private, bdev);
  824. if (fence == NULL) {
  825. vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
  826. reservation_object_add_excl_fence(bo->resv, &fence->base);
  827. dma_fence_put(&fence->base);
  828. } else
  829. reservation_object_add_excl_fence(bo->resv, &fence->base);
  830. }
  831. /**
  832. * vmw_dumb_create - Create a dumb kms buffer
  833. *
  834. * @file_priv: Pointer to a struct drm_file identifying the caller.
  835. * @dev: Pointer to the drm device.
  836. * @args: Pointer to a struct drm_mode_create_dumb structure
  837. * Return: Zero on success, negative error code on failure.
  838. *
  839. * This is a driver callback for the core drm create_dumb functionality.
  840. * Note that this is very similar to the vmw_bo_alloc ioctl, except
  841. * that the arguments have a different format.
  842. */
  843. int vmw_dumb_create(struct drm_file *file_priv,
  844. struct drm_device *dev,
  845. struct drm_mode_create_dumb *args)
  846. {
  847. struct vmw_private *dev_priv = vmw_priv(dev);
  848. struct vmw_buffer_object *vbo;
  849. int ret;
  850. args->pitch = args->width * ((args->bpp + 7) / 8);
  851. args->size = args->pitch * args->height;
  852. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  853. if (unlikely(ret != 0))
  854. return ret;
  855. ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
  856. args->size, false, &args->handle,
  857. &vbo, NULL);
  858. if (unlikely(ret != 0))
  859. goto out_no_bo;
  860. vmw_bo_unreference(&vbo);
  861. out_no_bo:
  862. ttm_read_unlock(&dev_priv->reservation_sem);
  863. return ret;
  864. }
  865. /**
  866. * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
  867. *
  868. * @file_priv: Pointer to a struct drm_file identifying the caller.
  869. * @dev: Pointer to the drm device.
  870. * @handle: Handle identifying the dumb buffer.
  871. * @offset: The address space offset returned.
  872. * Return: Zero on success, negative error code on failure.
  873. *
  874. * This is a driver callback for the core drm dumb_map_offset functionality.
  875. */
  876. int vmw_dumb_map_offset(struct drm_file *file_priv,
  877. struct drm_device *dev, uint32_t handle,
  878. uint64_t *offset)
  879. {
  880. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  881. struct vmw_buffer_object *out_buf;
  882. int ret;
  883. ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
  884. if (ret != 0)
  885. return -EINVAL;
  886. *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
  887. vmw_bo_unreference(&out_buf);
  888. return 0;
  889. }
  890. /**
  891. * vmw_dumb_destroy - Destroy a dumb boffer
  892. *
  893. * @file_priv: Pointer to a struct drm_file identifying the caller.
  894. * @dev: Pointer to the drm device.
  895. * @handle: Handle identifying the dumb buffer.
  896. * Return: Zero on success, negative error code on failure.
  897. *
  898. * This is a driver callback for the core drm dumb_destroy functionality.
  899. */
  900. int vmw_dumb_destroy(struct drm_file *file_priv,
  901. struct drm_device *dev,
  902. uint32_t handle)
  903. {
  904. return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  905. handle, TTM_REF_USAGE);
  906. }
  907. /**
  908. * vmw_bo_swap_notify - swapout notify callback.
  909. *
  910. * @bo: The buffer object to be swapped out.
  911. */
  912. void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
  913. {
  914. /* Is @bo embedded in a struct vmw_buffer_object? */
  915. if (bo->destroy != vmw_bo_bo_free &&
  916. bo->destroy != vmw_user_bo_destroy)
  917. return;
  918. /* Kill any cached kernel maps before swapout */
  919. vmw_bo_unmap(vmw_buffer_object(bo));
  920. }
  921. /**
  922. * vmw_bo_move_notify - TTM move_notify_callback
  923. *
  924. * @bo: The TTM buffer object about to move.
  925. * @mem: The struct ttm_mem_reg indicating to what memory
  926. * region the move is taking place.
  927. *
  928. * Detaches cached maps and device bindings that require that the
  929. * buffer doesn't move.
  930. */
  931. void vmw_bo_move_notify(struct ttm_buffer_object *bo,
  932. struct ttm_mem_reg *mem)
  933. {
  934. struct vmw_buffer_object *vbo;
  935. if (mem == NULL)
  936. return;
  937. /* Make sure @bo is embedded in a struct vmw_buffer_object? */
  938. if (bo->destroy != vmw_bo_bo_free &&
  939. bo->destroy != vmw_user_bo_destroy)
  940. return;
  941. vbo = container_of(bo, struct vmw_buffer_object, base);
  942. /*
  943. * Kill any cached kernel maps before move to or from VRAM.
  944. * With other types of moves, the underlying pages stay the same,
  945. * and the map can be kept.
  946. */
  947. if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
  948. vmw_bo_unmap(vbo);
  949. /*
  950. * If we're moving a backup MOB out of MOB placement, then make sure we
  951. * read back all resource content first, and unbind the MOB from
  952. * the resource.
  953. */
  954. if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
  955. vmw_resource_unbind_list(vbo);
  956. }