vmwgfx_context.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_resource_priv.h"
  29. #include "ttm/ttm_placement.h"
  30. struct vmw_user_context {
  31. struct ttm_base_object base;
  32. struct vmw_resource res;
  33. struct vmw_ctx_binding_state cbs;
  34. struct vmw_cmdbuf_res_manager *man;
  35. };
  36. typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
  37. static void vmw_user_context_free(struct vmw_resource *res);
  38. static struct vmw_resource *
  39. vmw_user_context_base_to_res(struct ttm_base_object *base);
  40. static int vmw_gb_context_create(struct vmw_resource *res);
  41. static int vmw_gb_context_bind(struct vmw_resource *res,
  42. struct ttm_validate_buffer *val_buf);
  43. static int vmw_gb_context_unbind(struct vmw_resource *res,
  44. bool readback,
  45. struct ttm_validate_buffer *val_buf);
  46. static int vmw_gb_context_destroy(struct vmw_resource *res);
  47. static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
  48. static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
  49. bool rebind);
  50. static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
  51. static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
  52. static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
  53. static uint64_t vmw_user_context_size;
  54. static const struct vmw_user_resource_conv user_context_conv = {
  55. .object_type = VMW_RES_CONTEXT,
  56. .base_obj_to_res = vmw_user_context_base_to_res,
  57. .res_free = vmw_user_context_free
  58. };
  59. const struct vmw_user_resource_conv *user_context_converter =
  60. &user_context_conv;
  61. static const struct vmw_res_func vmw_legacy_context_func = {
  62. .res_type = vmw_res_context,
  63. .needs_backup = false,
  64. .may_evict = false,
  65. .type_name = "legacy contexts",
  66. .backup_placement = NULL,
  67. .create = NULL,
  68. .destroy = NULL,
  69. .bind = NULL,
  70. .unbind = NULL
  71. };
  72. static const struct vmw_res_func vmw_gb_context_func = {
  73. .res_type = vmw_res_context,
  74. .needs_backup = true,
  75. .may_evict = true,
  76. .type_name = "guest backed contexts",
  77. .backup_placement = &vmw_mob_placement,
  78. .create = vmw_gb_context_create,
  79. .destroy = vmw_gb_context_destroy,
  80. .bind = vmw_gb_context_bind,
  81. .unbind = vmw_gb_context_unbind
  82. };
  83. static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
  84. [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
  85. [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
  86. [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
  87. /**
  88. * Context management:
  89. */
  90. static void vmw_hw_context_destroy(struct vmw_resource *res)
  91. {
  92. struct vmw_user_context *uctx =
  93. container_of(res, struct vmw_user_context, res);
  94. struct vmw_private *dev_priv = res->dev_priv;
  95. struct {
  96. SVGA3dCmdHeader header;
  97. SVGA3dCmdDestroyContext body;
  98. } *cmd;
  99. if (res->func->destroy == vmw_gb_context_destroy) {
  100. mutex_lock(&dev_priv->cmdbuf_mutex);
  101. vmw_cmdbuf_res_man_destroy(uctx->man);
  102. mutex_lock(&dev_priv->binding_mutex);
  103. (void) vmw_context_binding_state_kill(&uctx->cbs);
  104. (void) vmw_gb_context_destroy(res);
  105. mutex_unlock(&dev_priv->binding_mutex);
  106. if (dev_priv->pinned_bo != NULL &&
  107. !dev_priv->query_cid_valid)
  108. __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
  109. mutex_unlock(&dev_priv->cmdbuf_mutex);
  110. return;
  111. }
  112. vmw_execbuf_release_pinned_bo(dev_priv);
  113. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  114. if (unlikely(cmd == NULL)) {
  115. DRM_ERROR("Failed reserving FIFO space for surface "
  116. "destruction.\n");
  117. return;
  118. }
  119. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
  120. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  121. cmd->body.cid = cpu_to_le32(res->id);
  122. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  123. vmw_3d_resource_dec(dev_priv, false);
  124. }
  125. static int vmw_gb_context_init(struct vmw_private *dev_priv,
  126. struct vmw_resource *res,
  127. void (*res_free) (struct vmw_resource *res))
  128. {
  129. int ret;
  130. struct vmw_user_context *uctx =
  131. container_of(res, struct vmw_user_context, res);
  132. ret = vmw_resource_init(dev_priv, res, true,
  133. res_free, &vmw_gb_context_func);
  134. res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
  135. if (unlikely(ret != 0))
  136. goto out_err;
  137. if (dev_priv->has_mob) {
  138. uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
  139. if (unlikely(IS_ERR(uctx->man))) {
  140. ret = PTR_ERR(uctx->man);
  141. uctx->man = NULL;
  142. goto out_err;
  143. }
  144. }
  145. memset(&uctx->cbs, 0, sizeof(uctx->cbs));
  146. INIT_LIST_HEAD(&uctx->cbs.list);
  147. vmw_resource_activate(res, vmw_hw_context_destroy);
  148. return 0;
  149. out_err:
  150. if (res_free)
  151. res_free(res);
  152. else
  153. kfree(res);
  154. return ret;
  155. }
  156. static int vmw_context_init(struct vmw_private *dev_priv,
  157. struct vmw_resource *res,
  158. void (*res_free) (struct vmw_resource *res))
  159. {
  160. int ret;
  161. struct {
  162. SVGA3dCmdHeader header;
  163. SVGA3dCmdDefineContext body;
  164. } *cmd;
  165. if (dev_priv->has_mob)
  166. return vmw_gb_context_init(dev_priv, res, res_free);
  167. ret = vmw_resource_init(dev_priv, res, false,
  168. res_free, &vmw_legacy_context_func);
  169. if (unlikely(ret != 0)) {
  170. DRM_ERROR("Failed to allocate a resource id.\n");
  171. goto out_early;
  172. }
  173. if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
  174. DRM_ERROR("Out of hw context ids.\n");
  175. vmw_resource_unreference(&res);
  176. return -ENOMEM;
  177. }
  178. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  179. if (unlikely(cmd == NULL)) {
  180. DRM_ERROR("Fifo reserve failed.\n");
  181. vmw_resource_unreference(&res);
  182. return -ENOMEM;
  183. }
  184. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
  185. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  186. cmd->body.cid = cpu_to_le32(res->id);
  187. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  188. (void) vmw_3d_resource_inc(dev_priv, false);
  189. vmw_resource_activate(res, vmw_hw_context_destroy);
  190. return 0;
  191. out_early:
  192. if (res_free == NULL)
  193. kfree(res);
  194. else
  195. res_free(res);
  196. return ret;
  197. }
  198. struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
  199. {
  200. struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
  201. int ret;
  202. if (unlikely(res == NULL))
  203. return NULL;
  204. ret = vmw_context_init(dev_priv, res, NULL);
  205. return (ret == 0) ? res : NULL;
  206. }
  207. static int vmw_gb_context_create(struct vmw_resource *res)
  208. {
  209. struct vmw_private *dev_priv = res->dev_priv;
  210. int ret;
  211. struct {
  212. SVGA3dCmdHeader header;
  213. SVGA3dCmdDefineGBContext body;
  214. } *cmd;
  215. if (likely(res->id != -1))
  216. return 0;
  217. ret = vmw_resource_alloc_id(res);
  218. if (unlikely(ret != 0)) {
  219. DRM_ERROR("Failed to allocate a context id.\n");
  220. goto out_no_id;
  221. }
  222. if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
  223. ret = -EBUSY;
  224. goto out_no_fifo;
  225. }
  226. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  227. if (unlikely(cmd == NULL)) {
  228. DRM_ERROR("Failed reserving FIFO space for context "
  229. "creation.\n");
  230. ret = -ENOMEM;
  231. goto out_no_fifo;
  232. }
  233. cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
  234. cmd->header.size = sizeof(cmd->body);
  235. cmd->body.cid = res->id;
  236. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  237. (void) vmw_3d_resource_inc(dev_priv, false);
  238. return 0;
  239. out_no_fifo:
  240. vmw_resource_release_id(res);
  241. out_no_id:
  242. return ret;
  243. }
  244. static int vmw_gb_context_bind(struct vmw_resource *res,
  245. struct ttm_validate_buffer *val_buf)
  246. {
  247. struct vmw_private *dev_priv = res->dev_priv;
  248. struct {
  249. SVGA3dCmdHeader header;
  250. SVGA3dCmdBindGBContext body;
  251. } *cmd;
  252. struct ttm_buffer_object *bo = val_buf->bo;
  253. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  254. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  255. if (unlikely(cmd == NULL)) {
  256. DRM_ERROR("Failed reserving FIFO space for context "
  257. "binding.\n");
  258. return -ENOMEM;
  259. }
  260. cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
  261. cmd->header.size = sizeof(cmd->body);
  262. cmd->body.cid = res->id;
  263. cmd->body.mobid = bo->mem.start;
  264. cmd->body.validContents = res->backup_dirty;
  265. res->backup_dirty = false;
  266. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  267. return 0;
  268. }
  269. static int vmw_gb_context_unbind(struct vmw_resource *res,
  270. bool readback,
  271. struct ttm_validate_buffer *val_buf)
  272. {
  273. struct vmw_private *dev_priv = res->dev_priv;
  274. struct ttm_buffer_object *bo = val_buf->bo;
  275. struct vmw_fence_obj *fence;
  276. struct vmw_user_context *uctx =
  277. container_of(res, struct vmw_user_context, res);
  278. struct {
  279. SVGA3dCmdHeader header;
  280. SVGA3dCmdReadbackGBContext body;
  281. } *cmd1;
  282. struct {
  283. SVGA3dCmdHeader header;
  284. SVGA3dCmdBindGBContext body;
  285. } *cmd2;
  286. uint32_t submit_size;
  287. uint8_t *cmd;
  288. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  289. mutex_lock(&dev_priv->binding_mutex);
  290. vmw_context_binding_state_scrub(&uctx->cbs);
  291. submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
  292. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  293. if (unlikely(cmd == NULL)) {
  294. DRM_ERROR("Failed reserving FIFO space for context "
  295. "unbinding.\n");
  296. mutex_unlock(&dev_priv->binding_mutex);
  297. return -ENOMEM;
  298. }
  299. cmd2 = (void *) cmd;
  300. if (readback) {
  301. cmd1 = (void *) cmd;
  302. cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
  303. cmd1->header.size = sizeof(cmd1->body);
  304. cmd1->body.cid = res->id;
  305. cmd2 = (void *) (&cmd1[1]);
  306. }
  307. cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
  308. cmd2->header.size = sizeof(cmd2->body);
  309. cmd2->body.cid = res->id;
  310. cmd2->body.mobid = SVGA3D_INVALID_ID;
  311. vmw_fifo_commit(dev_priv, submit_size);
  312. mutex_unlock(&dev_priv->binding_mutex);
  313. /*
  314. * Create a fence object and fence the backup buffer.
  315. */
  316. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  317. &fence, NULL);
  318. vmw_fence_single_bo(bo, fence);
  319. if (likely(fence != NULL))
  320. vmw_fence_obj_unreference(&fence);
  321. return 0;
  322. }
  323. static int vmw_gb_context_destroy(struct vmw_resource *res)
  324. {
  325. struct vmw_private *dev_priv = res->dev_priv;
  326. struct {
  327. SVGA3dCmdHeader header;
  328. SVGA3dCmdDestroyGBContext body;
  329. } *cmd;
  330. if (likely(res->id == -1))
  331. return 0;
  332. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  333. if (unlikely(cmd == NULL)) {
  334. DRM_ERROR("Failed reserving FIFO space for context "
  335. "destruction.\n");
  336. return -ENOMEM;
  337. }
  338. cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
  339. cmd->header.size = sizeof(cmd->body);
  340. cmd->body.cid = res->id;
  341. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  342. if (dev_priv->query_cid == res->id)
  343. dev_priv->query_cid_valid = false;
  344. vmw_resource_release_id(res);
  345. vmw_3d_resource_dec(dev_priv, false);
  346. return 0;
  347. }
  348. /**
  349. * User-space context management:
  350. */
  351. static struct vmw_resource *
  352. vmw_user_context_base_to_res(struct ttm_base_object *base)
  353. {
  354. return &(container_of(base, struct vmw_user_context, base)->res);
  355. }
  356. static void vmw_user_context_free(struct vmw_resource *res)
  357. {
  358. struct vmw_user_context *ctx =
  359. container_of(res, struct vmw_user_context, res);
  360. struct vmw_private *dev_priv = res->dev_priv;
  361. ttm_base_object_kfree(ctx, base);
  362. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  363. vmw_user_context_size);
  364. }
  365. /**
  366. * This function is called when user space has no more references on the
  367. * base object. It releases the base-object's reference on the resource object.
  368. */
  369. static void vmw_user_context_base_release(struct ttm_base_object **p_base)
  370. {
  371. struct ttm_base_object *base = *p_base;
  372. struct vmw_user_context *ctx =
  373. container_of(base, struct vmw_user_context, base);
  374. struct vmw_resource *res = &ctx->res;
  375. *p_base = NULL;
  376. vmw_resource_unreference(&res);
  377. }
  378. int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
  379. struct drm_file *file_priv)
  380. {
  381. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  382. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  383. return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
  384. }
  385. int vmw_context_define_ioctl(struct drm_device *dev, void *data,
  386. struct drm_file *file_priv)
  387. {
  388. struct vmw_private *dev_priv = vmw_priv(dev);
  389. struct vmw_user_context *ctx;
  390. struct vmw_resource *res;
  391. struct vmw_resource *tmp;
  392. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  393. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  394. int ret;
  395. /*
  396. * Approximate idr memory usage with 128 bytes. It will be limited
  397. * by maximum number_of contexts anyway.
  398. */
  399. if (unlikely(vmw_user_context_size == 0))
  400. vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
  401. ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
  402. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  403. if (unlikely(ret != 0))
  404. return ret;
  405. ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  406. vmw_user_context_size,
  407. false, true);
  408. if (unlikely(ret != 0)) {
  409. if (ret != -ERESTARTSYS)
  410. DRM_ERROR("Out of graphics memory for context"
  411. " creation.\n");
  412. goto out_unlock;
  413. }
  414. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  415. if (unlikely(ctx == NULL)) {
  416. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  417. vmw_user_context_size);
  418. ret = -ENOMEM;
  419. goto out_unlock;
  420. }
  421. res = &ctx->res;
  422. ctx->base.shareable = false;
  423. ctx->base.tfile = NULL;
  424. /*
  425. * From here on, the destructor takes over resource freeing.
  426. */
  427. ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
  428. if (unlikely(ret != 0))
  429. goto out_unlock;
  430. tmp = vmw_resource_reference(&ctx->res);
  431. ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
  432. &vmw_user_context_base_release, NULL);
  433. if (unlikely(ret != 0)) {
  434. vmw_resource_unreference(&tmp);
  435. goto out_err;
  436. }
  437. arg->cid = ctx->base.hash.key;
  438. out_err:
  439. vmw_resource_unreference(&res);
  440. out_unlock:
  441. ttm_read_unlock(&dev_priv->reservation_sem);
  442. return ret;
  443. }
  444. /**
  445. * vmw_context_scrub_shader - scrub a shader binding from a context.
  446. *
  447. * @bi: single binding information.
  448. * @rebind: Whether to issue a bind instead of scrub command.
  449. */
  450. static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
  451. {
  452. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  453. struct {
  454. SVGA3dCmdHeader header;
  455. SVGA3dCmdSetShader body;
  456. } *cmd;
  457. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  458. if (unlikely(cmd == NULL)) {
  459. DRM_ERROR("Failed reserving FIFO space for shader "
  460. "unbinding.\n");
  461. return -ENOMEM;
  462. }
  463. cmd->header.id = SVGA_3D_CMD_SET_SHADER;
  464. cmd->header.size = sizeof(cmd->body);
  465. cmd->body.cid = bi->ctx->id;
  466. cmd->body.type = bi->i1.shader_type;
  467. cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  468. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  469. return 0;
  470. }
  471. /**
  472. * vmw_context_scrub_render_target - scrub a render target binding
  473. * from a context.
  474. *
  475. * @bi: single binding information.
  476. * @rebind: Whether to issue a bind instead of scrub command.
  477. */
  478. static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
  479. bool rebind)
  480. {
  481. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  482. struct {
  483. SVGA3dCmdHeader header;
  484. SVGA3dCmdSetRenderTarget body;
  485. } *cmd;
  486. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  487. if (unlikely(cmd == NULL)) {
  488. DRM_ERROR("Failed reserving FIFO space for render target "
  489. "unbinding.\n");
  490. return -ENOMEM;
  491. }
  492. cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
  493. cmd->header.size = sizeof(cmd->body);
  494. cmd->body.cid = bi->ctx->id;
  495. cmd->body.type = bi->i1.rt_type;
  496. cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  497. cmd->body.target.face = 0;
  498. cmd->body.target.mipmap = 0;
  499. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  500. return 0;
  501. }
  502. /**
  503. * vmw_context_scrub_texture - scrub a texture binding from a context.
  504. *
  505. * @bi: single binding information.
  506. * @rebind: Whether to issue a bind instead of scrub command.
  507. *
  508. * TODO: Possibly complement this function with a function that takes
  509. * a list of texture bindings and combines them to a single command.
  510. */
  511. static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
  512. bool rebind)
  513. {
  514. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  515. struct {
  516. SVGA3dCmdHeader header;
  517. struct {
  518. SVGA3dCmdSetTextureState c;
  519. SVGA3dTextureState s1;
  520. } body;
  521. } *cmd;
  522. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  523. if (unlikely(cmd == NULL)) {
  524. DRM_ERROR("Failed reserving FIFO space for texture "
  525. "unbinding.\n");
  526. return -ENOMEM;
  527. }
  528. cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
  529. cmd->header.size = sizeof(cmd->body);
  530. cmd->body.c.cid = bi->ctx->id;
  531. cmd->body.s1.stage = bi->i1.texture_stage;
  532. cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
  533. cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  534. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  535. return 0;
  536. }
  537. /**
  538. * vmw_context_binding_drop: Stop tracking a context binding
  539. *
  540. * @cb: Pointer to binding tracker storage.
  541. *
  542. * Stops tracking a context binding, and re-initializes its storage.
  543. * Typically used when the context binding is replaced with a binding to
  544. * another (or the same, for that matter) resource.
  545. */
  546. static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
  547. {
  548. list_del(&cb->ctx_list);
  549. if (!list_empty(&cb->res_list))
  550. list_del(&cb->res_list);
  551. cb->bi.ctx = NULL;
  552. }
  553. /**
  554. * vmw_context_binding_add: Start tracking a context binding
  555. *
  556. * @cbs: Pointer to the context binding state tracker.
  557. * @bi: Information about the binding to track.
  558. *
  559. * Performs basic checks on the binding to make sure arguments are within
  560. * bounds and then starts tracking the binding in the context binding
  561. * state structure @cbs.
  562. */
  563. int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
  564. const struct vmw_ctx_bindinfo *bi)
  565. {
  566. struct vmw_ctx_binding *loc;
  567. switch (bi->bt) {
  568. case vmw_ctx_binding_rt:
  569. if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
  570. DRM_ERROR("Illegal render target type %u.\n",
  571. (unsigned) bi->i1.rt_type);
  572. return -EINVAL;
  573. }
  574. loc = &cbs->render_targets[bi->i1.rt_type];
  575. break;
  576. case vmw_ctx_binding_tex:
  577. if (unlikely((unsigned)bi->i1.texture_stage >=
  578. SVGA3D_NUM_TEXTURE_UNITS)) {
  579. DRM_ERROR("Illegal texture/sampler unit %u.\n",
  580. (unsigned) bi->i1.texture_stage);
  581. return -EINVAL;
  582. }
  583. loc = &cbs->texture_units[bi->i1.texture_stage];
  584. break;
  585. case vmw_ctx_binding_shader:
  586. if (unlikely((unsigned)bi->i1.shader_type >=
  587. SVGA3D_SHADERTYPE_MAX)) {
  588. DRM_ERROR("Illegal shader type %u.\n",
  589. (unsigned) bi->i1.shader_type);
  590. return -EINVAL;
  591. }
  592. loc = &cbs->shaders[bi->i1.shader_type];
  593. break;
  594. default:
  595. BUG();
  596. }
  597. if (loc->bi.ctx != NULL)
  598. vmw_context_binding_drop(loc);
  599. loc->bi = *bi;
  600. loc->bi.scrubbed = false;
  601. list_add_tail(&loc->ctx_list, &cbs->list);
  602. INIT_LIST_HEAD(&loc->res_list);
  603. return 0;
  604. }
  605. /**
  606. * vmw_context_binding_transfer: Transfer a context binding tracking entry.
  607. *
  608. * @cbs: Pointer to the persistent context binding state tracker.
  609. * @bi: Information about the binding to track.
  610. *
  611. */
  612. static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
  613. const struct vmw_ctx_bindinfo *bi)
  614. {
  615. struct vmw_ctx_binding *loc;
  616. switch (bi->bt) {
  617. case vmw_ctx_binding_rt:
  618. loc = &cbs->render_targets[bi->i1.rt_type];
  619. break;
  620. case vmw_ctx_binding_tex:
  621. loc = &cbs->texture_units[bi->i1.texture_stage];
  622. break;
  623. case vmw_ctx_binding_shader:
  624. loc = &cbs->shaders[bi->i1.shader_type];
  625. break;
  626. default:
  627. BUG();
  628. }
  629. if (loc->bi.ctx != NULL)
  630. vmw_context_binding_drop(loc);
  631. if (bi->res != NULL) {
  632. loc->bi = *bi;
  633. list_add_tail(&loc->ctx_list, &cbs->list);
  634. list_add_tail(&loc->res_list, &bi->res->binding_head);
  635. }
  636. }
  637. /**
  638. * vmw_context_binding_kill - Kill a binding on the device
  639. * and stop tracking it.
  640. *
  641. * @cb: Pointer to binding tracker storage.
  642. *
  643. * Emits FIFO commands to scrub a binding represented by @cb.
  644. * Then stops tracking the binding and re-initializes its storage.
  645. */
  646. static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
  647. {
  648. if (!cb->bi.scrubbed) {
  649. (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
  650. cb->bi.scrubbed = true;
  651. }
  652. vmw_context_binding_drop(cb);
  653. }
  654. /**
  655. * vmw_context_binding_state_kill - Kill all bindings associated with a
  656. * struct vmw_ctx_binding state structure, and re-initialize the structure.
  657. *
  658. * @cbs: Pointer to the context binding state tracker.
  659. *
  660. * Emits commands to scrub all bindings associated with the
  661. * context binding state tracker. Then re-initializes the whole structure.
  662. */
  663. static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
  664. {
  665. struct vmw_ctx_binding *entry, *next;
  666. list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
  667. vmw_context_binding_kill(entry);
  668. }
  669. /**
  670. * vmw_context_binding_state_scrub - Scrub all bindings associated with a
  671. * struct vmw_ctx_binding state structure.
  672. *
  673. * @cbs: Pointer to the context binding state tracker.
  674. *
  675. * Emits commands to scrub all bindings associated with the
  676. * context binding state tracker.
  677. */
  678. static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
  679. {
  680. struct vmw_ctx_binding *entry;
  681. list_for_each_entry(entry, &cbs->list, ctx_list) {
  682. if (!entry->bi.scrubbed) {
  683. (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
  684. entry->bi.scrubbed = true;
  685. }
  686. }
  687. }
  688. /**
  689. * vmw_context_binding_res_list_kill - Kill all bindings on a
  690. * resource binding list
  691. *
  692. * @head: list head of resource binding list
  693. *
  694. * Kills all bindings associated with a specific resource. Typically
  695. * called before the resource is destroyed.
  696. */
  697. void vmw_context_binding_res_list_kill(struct list_head *head)
  698. {
  699. struct vmw_ctx_binding *entry, *next;
  700. list_for_each_entry_safe(entry, next, head, res_list)
  701. vmw_context_binding_kill(entry);
  702. }
  703. /**
  704. * vmw_context_binding_res_list_scrub - Scrub all bindings on a
  705. * resource binding list
  706. *
  707. * @head: list head of resource binding list
  708. *
  709. * Scrub all bindings associated with a specific resource. Typically
  710. * called before the resource is evicted.
  711. */
  712. void vmw_context_binding_res_list_scrub(struct list_head *head)
  713. {
  714. struct vmw_ctx_binding *entry;
  715. list_for_each_entry(entry, head, res_list) {
  716. if (!entry->bi.scrubbed) {
  717. (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
  718. entry->bi.scrubbed = true;
  719. }
  720. }
  721. }
  722. /**
  723. * vmw_context_binding_state_transfer - Commit staged binding info
  724. *
  725. * @ctx: Pointer to context to commit the staged binding info to.
  726. * @from: Staged binding info built during execbuf.
  727. *
  728. * Transfers binding info from a temporary structure to the persistent
  729. * structure in the context. This can be done once commands
  730. */
  731. void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
  732. struct vmw_ctx_binding_state *from)
  733. {
  734. struct vmw_user_context *uctx =
  735. container_of(ctx, struct vmw_user_context, res);
  736. struct vmw_ctx_binding *entry, *next;
  737. list_for_each_entry_safe(entry, next, &from->list, ctx_list)
  738. vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
  739. }
  740. /**
  741. * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
  742. *
  743. * @ctx: The context resource
  744. *
  745. * Walks through the context binding list and rebinds all scrubbed
  746. * resources.
  747. */
  748. int vmw_context_rebind_all(struct vmw_resource *ctx)
  749. {
  750. struct vmw_ctx_binding *entry;
  751. struct vmw_user_context *uctx =
  752. container_of(ctx, struct vmw_user_context, res);
  753. struct vmw_ctx_binding_state *cbs = &uctx->cbs;
  754. int ret;
  755. list_for_each_entry(entry, &cbs->list, ctx_list) {
  756. if (likely(!entry->bi.scrubbed))
  757. continue;
  758. if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
  759. SVGA3D_INVALID_ID))
  760. continue;
  761. ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
  762. if (unlikely(ret != 0))
  763. return ret;
  764. entry->bi.scrubbed = false;
  765. }
  766. return 0;
  767. }
  768. /**
  769. * vmw_context_binding_list - Return a list of context bindings
  770. *
  771. * @ctx: The context resource
  772. *
  773. * Returns the current list of bindings of the given context. Note that
  774. * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
  775. */
  776. struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
  777. {
  778. return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
  779. }
  780. struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
  781. {
  782. return container_of(ctx, struct vmw_user_context, res)->man;
  783. }