vmwgfx_shader.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_resource_priv.h"
  29. #include "ttm/ttm_placement.h"
  30. struct vmw_shader {
  31. struct vmw_resource res;
  32. SVGA3dShaderType type;
  33. uint32_t size;
  34. };
  35. struct vmw_user_shader {
  36. struct ttm_base_object base;
  37. struct vmw_shader shader;
  38. };
  39. static uint64_t vmw_user_shader_size;
  40. static uint64_t vmw_shader_size;
  41. static void vmw_user_shader_free(struct vmw_resource *res);
  42. static struct vmw_resource *
  43. vmw_user_shader_base_to_res(struct ttm_base_object *base);
  44. static int vmw_gb_shader_create(struct vmw_resource *res);
  45. static int vmw_gb_shader_bind(struct vmw_resource *res,
  46. struct ttm_validate_buffer *val_buf);
  47. static int vmw_gb_shader_unbind(struct vmw_resource *res,
  48. bool readback,
  49. struct ttm_validate_buffer *val_buf);
  50. static int vmw_gb_shader_destroy(struct vmw_resource *res);
  51. static const struct vmw_user_resource_conv user_shader_conv = {
  52. .object_type = VMW_RES_SHADER,
  53. .base_obj_to_res = vmw_user_shader_base_to_res,
  54. .res_free = vmw_user_shader_free
  55. };
  56. const struct vmw_user_resource_conv *user_shader_converter =
  57. &user_shader_conv;
  58. static const struct vmw_res_func vmw_gb_shader_func = {
  59. .res_type = vmw_res_shader,
  60. .needs_backup = true,
  61. .may_evict = true,
  62. .type_name = "guest backed shaders",
  63. .backup_placement = &vmw_mob_placement,
  64. .create = vmw_gb_shader_create,
  65. .destroy = vmw_gb_shader_destroy,
  66. .bind = vmw_gb_shader_bind,
  67. .unbind = vmw_gb_shader_unbind
  68. };
  69. /**
  70. * Shader management:
  71. */
  72. static inline struct vmw_shader *
  73. vmw_res_to_shader(struct vmw_resource *res)
  74. {
  75. return container_of(res, struct vmw_shader, res);
  76. }
  77. static void vmw_hw_shader_destroy(struct vmw_resource *res)
  78. {
  79. (void) vmw_gb_shader_destroy(res);
  80. }
  81. static int vmw_gb_shader_init(struct vmw_private *dev_priv,
  82. struct vmw_resource *res,
  83. uint32_t size,
  84. uint64_t offset,
  85. SVGA3dShaderType type,
  86. struct vmw_dma_buffer *byte_code,
  87. void (*res_free) (struct vmw_resource *res))
  88. {
  89. struct vmw_shader *shader = vmw_res_to_shader(res);
  90. int ret;
  91. ret = vmw_resource_init(dev_priv, res, true,
  92. res_free, &vmw_gb_shader_func);
  93. if (unlikely(ret != 0)) {
  94. if (res_free)
  95. res_free(res);
  96. else
  97. kfree(res);
  98. return ret;
  99. }
  100. res->backup_size = size;
  101. if (byte_code) {
  102. res->backup = vmw_dmabuf_reference(byte_code);
  103. res->backup_offset = offset;
  104. }
  105. shader->size = size;
  106. shader->type = type;
  107. vmw_resource_activate(res, vmw_hw_shader_destroy);
  108. return 0;
  109. }
  110. static int vmw_gb_shader_create(struct vmw_resource *res)
  111. {
  112. struct vmw_private *dev_priv = res->dev_priv;
  113. struct vmw_shader *shader = vmw_res_to_shader(res);
  114. int ret;
  115. struct {
  116. SVGA3dCmdHeader header;
  117. SVGA3dCmdDefineGBShader body;
  118. } *cmd;
  119. if (likely(res->id != -1))
  120. return 0;
  121. ret = vmw_resource_alloc_id(res);
  122. if (unlikely(ret != 0)) {
  123. DRM_ERROR("Failed to allocate a shader id.\n");
  124. goto out_no_id;
  125. }
  126. if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
  127. ret = -EBUSY;
  128. goto out_no_fifo;
  129. }
  130. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  131. if (unlikely(cmd == NULL)) {
  132. DRM_ERROR("Failed reserving FIFO space for shader "
  133. "creation.\n");
  134. ret = -ENOMEM;
  135. goto out_no_fifo;
  136. }
  137. cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
  138. cmd->header.size = sizeof(cmd->body);
  139. cmd->body.shid = res->id;
  140. cmd->body.type = shader->type;
  141. cmd->body.sizeInBytes = shader->size;
  142. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  143. (void) vmw_3d_resource_inc(dev_priv, false);
  144. return 0;
  145. out_no_fifo:
  146. vmw_resource_release_id(res);
  147. out_no_id:
  148. return ret;
  149. }
  150. static int vmw_gb_shader_bind(struct vmw_resource *res,
  151. struct ttm_validate_buffer *val_buf)
  152. {
  153. struct vmw_private *dev_priv = res->dev_priv;
  154. struct {
  155. SVGA3dCmdHeader header;
  156. SVGA3dCmdBindGBShader body;
  157. } *cmd;
  158. struct ttm_buffer_object *bo = val_buf->bo;
  159. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  160. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  161. if (unlikely(cmd == NULL)) {
  162. DRM_ERROR("Failed reserving FIFO space for shader "
  163. "binding.\n");
  164. return -ENOMEM;
  165. }
  166. cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
  167. cmd->header.size = sizeof(cmd->body);
  168. cmd->body.shid = res->id;
  169. cmd->body.mobid = bo->mem.start;
  170. cmd->body.offsetInBytes = res->backup_offset;
  171. res->backup_dirty = false;
  172. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  173. return 0;
  174. }
  175. static int vmw_gb_shader_unbind(struct vmw_resource *res,
  176. bool readback,
  177. struct ttm_validate_buffer *val_buf)
  178. {
  179. struct vmw_private *dev_priv = res->dev_priv;
  180. struct {
  181. SVGA3dCmdHeader header;
  182. SVGA3dCmdBindGBShader body;
  183. } *cmd;
  184. struct vmw_fence_obj *fence;
  185. BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
  186. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  187. if (unlikely(cmd == NULL)) {
  188. DRM_ERROR("Failed reserving FIFO space for shader "
  189. "unbinding.\n");
  190. return -ENOMEM;
  191. }
  192. cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
  193. cmd->header.size = sizeof(cmd->body);
  194. cmd->body.shid = res->id;
  195. cmd->body.mobid = SVGA3D_INVALID_ID;
  196. cmd->body.offsetInBytes = 0;
  197. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  198. /*
  199. * Create a fence object and fence the backup buffer.
  200. */
  201. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  202. &fence, NULL);
  203. vmw_fence_single_bo(val_buf->bo, fence);
  204. if (likely(fence != NULL))
  205. vmw_fence_obj_unreference(&fence);
  206. return 0;
  207. }
  208. static int vmw_gb_shader_destroy(struct vmw_resource *res)
  209. {
  210. struct vmw_private *dev_priv = res->dev_priv;
  211. struct {
  212. SVGA3dCmdHeader header;
  213. SVGA3dCmdDestroyGBShader body;
  214. } *cmd;
  215. if (likely(res->id == -1))
  216. return 0;
  217. mutex_lock(&dev_priv->binding_mutex);
  218. vmw_context_binding_res_list_scrub(&res->binding_head);
  219. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  220. if (unlikely(cmd == NULL)) {
  221. DRM_ERROR("Failed reserving FIFO space for shader "
  222. "destruction.\n");
  223. mutex_unlock(&dev_priv->binding_mutex);
  224. return -ENOMEM;
  225. }
  226. cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
  227. cmd->header.size = sizeof(cmd->body);
  228. cmd->body.shid = res->id;
  229. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  230. mutex_unlock(&dev_priv->binding_mutex);
  231. vmw_resource_release_id(res);
  232. vmw_3d_resource_dec(dev_priv, false);
  233. return 0;
  234. }
  235. /**
  236. * User-space shader management:
  237. */
  238. static struct vmw_resource *
  239. vmw_user_shader_base_to_res(struct ttm_base_object *base)
  240. {
  241. return &(container_of(base, struct vmw_user_shader, base)->
  242. shader.res);
  243. }
  244. static void vmw_user_shader_free(struct vmw_resource *res)
  245. {
  246. struct vmw_user_shader *ushader =
  247. container_of(res, struct vmw_user_shader, shader.res);
  248. struct vmw_private *dev_priv = res->dev_priv;
  249. ttm_base_object_kfree(ushader, base);
  250. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  251. vmw_user_shader_size);
  252. }
  253. static void vmw_shader_free(struct vmw_resource *res)
  254. {
  255. struct vmw_shader *shader = vmw_res_to_shader(res);
  256. struct vmw_private *dev_priv = res->dev_priv;
  257. kfree(shader);
  258. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  259. vmw_shader_size);
  260. }
  261. /**
  262. * This function is called when user space has no more references on the
  263. * base object. It releases the base-object's reference on the resource object.
  264. */
  265. static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
  266. {
  267. struct ttm_base_object *base = *p_base;
  268. struct vmw_resource *res = vmw_user_shader_base_to_res(base);
  269. *p_base = NULL;
  270. vmw_resource_unreference(&res);
  271. }
  272. int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
  273. struct drm_file *file_priv)
  274. {
  275. struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
  276. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  277. return ttm_ref_object_base_unref(tfile, arg->handle,
  278. TTM_REF_USAGE);
  279. }
  280. static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
  281. struct vmw_dma_buffer *buffer,
  282. size_t shader_size,
  283. size_t offset,
  284. SVGA3dShaderType shader_type,
  285. struct ttm_object_file *tfile,
  286. u32 *handle)
  287. {
  288. struct vmw_user_shader *ushader;
  289. struct vmw_resource *res, *tmp;
  290. int ret;
  291. /*
  292. * Approximate idr memory usage with 128 bytes. It will be limited
  293. * by maximum number_of shaders anyway.
  294. */
  295. if (unlikely(vmw_user_shader_size == 0))
  296. vmw_user_shader_size =
  297. ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
  298. ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  299. vmw_user_shader_size,
  300. false, true);
  301. if (unlikely(ret != 0)) {
  302. if (ret != -ERESTARTSYS)
  303. DRM_ERROR("Out of graphics memory for shader "
  304. "creation.\n");
  305. goto out;
  306. }
  307. ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
  308. if (unlikely(ushader == NULL)) {
  309. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  310. vmw_user_shader_size);
  311. ret = -ENOMEM;
  312. goto out;
  313. }
  314. res = &ushader->shader.res;
  315. ushader->base.shareable = false;
  316. ushader->base.tfile = NULL;
  317. /*
  318. * From here on, the destructor takes over resource freeing.
  319. */
  320. ret = vmw_gb_shader_init(dev_priv, res, shader_size,
  321. offset, shader_type, buffer,
  322. vmw_user_shader_free);
  323. if (unlikely(ret != 0))
  324. goto out;
  325. tmp = vmw_resource_reference(res);
  326. ret = ttm_base_object_init(tfile, &ushader->base, false,
  327. VMW_RES_SHADER,
  328. &vmw_user_shader_base_release, NULL);
  329. if (unlikely(ret != 0)) {
  330. vmw_resource_unreference(&tmp);
  331. goto out_err;
  332. }
  333. if (handle)
  334. *handle = ushader->base.hash.key;
  335. out_err:
  336. vmw_resource_unreference(&res);
  337. out:
  338. return ret;
  339. }
  340. struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
  341. struct vmw_dma_buffer *buffer,
  342. size_t shader_size,
  343. size_t offset,
  344. SVGA3dShaderType shader_type)
  345. {
  346. struct vmw_shader *shader;
  347. struct vmw_resource *res;
  348. int ret;
  349. /*
  350. * Approximate idr memory usage with 128 bytes. It will be limited
  351. * by maximum number_of shaders anyway.
  352. */
  353. if (unlikely(vmw_shader_size == 0))
  354. vmw_shader_size =
  355. ttm_round_pot(sizeof(struct vmw_shader)) + 128;
  356. ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  357. vmw_shader_size,
  358. false, true);
  359. if (unlikely(ret != 0)) {
  360. if (ret != -ERESTARTSYS)
  361. DRM_ERROR("Out of graphics memory for shader "
  362. "creation.\n");
  363. goto out_err;
  364. }
  365. shader = kzalloc(sizeof(*shader), GFP_KERNEL);
  366. if (unlikely(shader == NULL)) {
  367. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  368. vmw_shader_size);
  369. ret = -ENOMEM;
  370. goto out_err;
  371. }
  372. res = &shader->res;
  373. /*
  374. * From here on, the destructor takes over resource freeing.
  375. */
  376. ret = vmw_gb_shader_init(dev_priv, res, shader_size,
  377. offset, shader_type, buffer,
  378. vmw_shader_free);
  379. out_err:
  380. return ret ? ERR_PTR(ret) : res;
  381. }
  382. int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
  383. struct drm_file *file_priv)
  384. {
  385. struct vmw_private *dev_priv = vmw_priv(dev);
  386. struct drm_vmw_shader_create_arg *arg =
  387. (struct drm_vmw_shader_create_arg *)data;
  388. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  389. struct vmw_dma_buffer *buffer = NULL;
  390. SVGA3dShaderType shader_type;
  391. int ret;
  392. if (arg->buffer_handle != SVGA3D_INVALID_ID) {
  393. ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
  394. &buffer);
  395. if (unlikely(ret != 0)) {
  396. DRM_ERROR("Could not find buffer for shader "
  397. "creation.\n");
  398. return ret;
  399. }
  400. if ((u64)buffer->base.num_pages * PAGE_SIZE <
  401. (u64)arg->size + (u64)arg->offset) {
  402. DRM_ERROR("Illegal buffer- or shader size.\n");
  403. ret = -EINVAL;
  404. goto out_bad_arg;
  405. }
  406. }
  407. switch (arg->shader_type) {
  408. case drm_vmw_shader_type_vs:
  409. shader_type = SVGA3D_SHADERTYPE_VS;
  410. break;
  411. case drm_vmw_shader_type_ps:
  412. shader_type = SVGA3D_SHADERTYPE_PS;
  413. break;
  414. case drm_vmw_shader_type_gs:
  415. shader_type = SVGA3D_SHADERTYPE_GS;
  416. break;
  417. default:
  418. DRM_ERROR("Illegal shader type.\n");
  419. ret = -EINVAL;
  420. goto out_bad_arg;
  421. }
  422. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  423. if (unlikely(ret != 0))
  424. goto out_bad_arg;
  425. ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
  426. shader_type, tfile, &arg->shader_handle);
  427. ttm_read_unlock(&dev_priv->reservation_sem);
  428. out_bad_arg:
  429. vmw_dmabuf_unreference(&buffer);
  430. return ret;
  431. }
  432. /**
  433. * vmw_compat_shader_id_ok - Check whether a compat shader user key and
  434. * shader type are within valid bounds.
  435. *
  436. * @user_key: User space id of the shader.
  437. * @shader_type: Shader type.
  438. *
  439. * Returns true if valid false if not.
  440. */
  441. static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
  442. {
  443. return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
  444. }
  445. /**
  446. * vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
  447. *
  448. * @user_key: User space id of the shader.
  449. * @shader_type: Shader type.
  450. *
  451. * Returns a hash key suitable for a command buffer managed resource
  452. * manager hash table.
  453. */
  454. static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
  455. {
  456. return user_key | (shader_type << 20);
  457. }
  458. /**
  459. * vmw_compat_shader_remove - Stage a compat shader for removal.
  460. *
  461. * @man: Pointer to the compat shader manager identifying the shader namespace.
  462. * @user_key: The key that is used to identify the shader. The key is
  463. * unique to the shader type.
  464. * @shader_type: Shader type.
  465. * @list: Caller's list of staged command buffer resource actions.
  466. */
  467. int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
  468. u32 user_key, SVGA3dShaderType shader_type,
  469. struct list_head *list)
  470. {
  471. if (!vmw_compat_shader_id_ok(user_key, shader_type))
  472. return -EINVAL;
  473. return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
  474. vmw_compat_shader_key(user_key,
  475. shader_type),
  476. list);
  477. }
  478. /**
  479. * vmw_compat_shader_add - Create a compat shader and stage it for addition
  480. * as a command buffer managed resource.
  481. *
  482. * @man: Pointer to the compat shader manager identifying the shader namespace.
  483. * @user_key: The key that is used to identify the shader. The key is
  484. * unique to the shader type.
  485. * @bytecode: Pointer to the bytecode of the shader.
  486. * @shader_type: Shader type.
  487. * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
  488. * to be created with.
  489. * @list: Caller's list of staged command buffer resource actions.
  490. *
  491. */
  492. int vmw_compat_shader_add(struct vmw_private *dev_priv,
  493. struct vmw_cmdbuf_res_manager *man,
  494. u32 user_key, const void *bytecode,
  495. SVGA3dShaderType shader_type,
  496. size_t size,
  497. struct list_head *list)
  498. {
  499. struct vmw_dma_buffer *buf;
  500. struct ttm_bo_kmap_obj map;
  501. bool is_iomem;
  502. int ret;
  503. struct vmw_resource *res;
  504. if (!vmw_compat_shader_id_ok(user_key, shader_type))
  505. return -EINVAL;
  506. /* Allocate and pin a DMA buffer */
  507. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  508. if (unlikely(buf == NULL))
  509. return -ENOMEM;
  510. ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
  511. true, vmw_dmabuf_bo_free);
  512. if (unlikely(ret != 0))
  513. goto out;
  514. ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
  515. if (unlikely(ret != 0))
  516. goto no_reserve;
  517. /* Map and copy shader bytecode. */
  518. ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
  519. &map);
  520. if (unlikely(ret != 0)) {
  521. ttm_bo_unreserve(&buf->base);
  522. goto no_reserve;
  523. }
  524. memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
  525. WARN_ON(is_iomem);
  526. ttm_bo_kunmap(&map);
  527. ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
  528. WARN_ON(ret != 0);
  529. ttm_bo_unreserve(&buf->base);
  530. res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
  531. if (unlikely(ret != 0))
  532. goto no_reserve;
  533. ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
  534. vmw_compat_shader_key(user_key, shader_type),
  535. res, list);
  536. vmw_resource_unreference(&res);
  537. no_reserve:
  538. vmw_dmabuf_unreference(&buf);
  539. out:
  540. return ret;
  541. }
  542. /**
  543. * vmw_compat_shader_lookup - Look up a compat shader
  544. *
  545. * @man: Pointer to the command buffer managed resource manager identifying
  546. * the shader namespace.
  547. * @user_key: The user space id of the shader.
  548. * @shader_type: The shader type.
  549. *
  550. * Returns a refcounted pointer to a struct vmw_resource if the shader was
  551. * found. An error pointer otherwise.
  552. */
  553. struct vmw_resource *
  554. vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
  555. u32 user_key,
  556. SVGA3dShaderType shader_type)
  557. {
  558. if (!vmw_compat_shader_id_ok(user_key, shader_type))
  559. return ERR_PTR(-EINVAL);
  560. return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
  561. vmw_compat_shader_key(user_key,
  562. shader_type));
  563. }