engctx.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. /*
  2. * Copyright 2012 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include <core/engctx.h>
  25. #include <core/engine.h>
  26. #include <core/client.h>
  27. static inline int
  28. nvkm_engctx_exists(struct nvkm_object *parent,
  29. struct nvkm_engine *engine, void **pobject)
  30. {
  31. struct nvkm_engctx *engctx;
  32. struct nvkm_object *parctx;
  33. list_for_each_entry(engctx, &engine->contexts, head) {
  34. parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS);
  35. if (parctx == parent) {
  36. atomic_inc(&nv_object(engctx)->refcount);
  37. *pobject = engctx;
  38. return 1;
  39. }
  40. }
  41. return 0;
  42. }
  43. int
  44. nvkm_engctx_create_(struct nvkm_object *parent, struct nvkm_object *engobj,
  45. struct nvkm_oclass *oclass, struct nvkm_object *pargpu,
  46. u32 size, u32 align, u32 flags, int length, void **pobject)
  47. {
  48. struct nvkm_client *client = nvkm_client(parent);
  49. struct nvkm_engine *engine = nv_engine(engobj);
  50. struct nvkm_object *engctx;
  51. unsigned long save;
  52. int ret;
  53. /* check if this engine already has a context for the parent object,
  54. * and reference it instead of creating a new one
  55. */
  56. spin_lock_irqsave(&engine->lock, save);
  57. ret = nvkm_engctx_exists(parent, engine, pobject);
  58. spin_unlock_irqrestore(&engine->lock, save);
  59. if (ret)
  60. return ret;
  61. /* create the new context, supports creating both raw objects and
  62. * objects backed by instance memory
  63. */
  64. if (size) {
  65. ret = nvkm_gpuobj_create_(parent, engobj, oclass,
  66. NV_ENGCTX_CLASS, pargpu, size,
  67. align, flags, length, pobject);
  68. } else {
  69. ret = nvkm_object_create_(parent, engobj, oclass,
  70. NV_ENGCTX_CLASS, length, pobject);
  71. }
  72. engctx = *pobject;
  73. if (ret)
  74. return ret;
  75. /* must take the lock again and re-check a context doesn't already
  76. * exist (in case of a race) - the lock had to be dropped before as
  77. * it's not possible to allocate the object with it held.
  78. */
  79. spin_lock_irqsave(&engine->lock, save);
  80. ret = nvkm_engctx_exists(parent, engine, pobject);
  81. if (ret) {
  82. spin_unlock_irqrestore(&engine->lock, save);
  83. nvkm_object_ref(NULL, &engctx);
  84. return ret;
  85. }
  86. if (client->vm)
  87. atomic_inc(&client->vm->engref[nv_engidx(engine)]);
  88. list_add(&nv_engctx(engctx)->head, &engine->contexts);
  89. nv_engctx(engctx)->addr = ~0ULL;
  90. spin_unlock_irqrestore(&engine->lock, save);
  91. return 0;
  92. }
  93. void
  94. nvkm_engctx_destroy(struct nvkm_engctx *engctx)
  95. {
  96. struct nvkm_engine *engine = engctx->gpuobj.object.engine;
  97. struct nvkm_client *client = nvkm_client(engctx);
  98. unsigned long save;
  99. nvkm_gpuobj_unmap(&engctx->vma);
  100. spin_lock_irqsave(&engine->lock, save);
  101. list_del(&engctx->head);
  102. spin_unlock_irqrestore(&engine->lock, save);
  103. if (client->vm)
  104. atomic_dec(&client->vm->engref[nv_engidx(engine)]);
  105. if (engctx->gpuobj.size)
  106. nvkm_gpuobj_destroy(&engctx->gpuobj);
  107. else
  108. nvkm_object_destroy(&engctx->gpuobj.object);
  109. }
  110. int
  111. nvkm_engctx_init(struct nvkm_engctx *engctx)
  112. {
  113. struct nvkm_object *object = nv_object(engctx);
  114. struct nvkm_subdev *subdev = nv_subdev(object->engine);
  115. struct nvkm_object *parent;
  116. struct nvkm_subdev *pardev;
  117. int ret;
  118. ret = nvkm_gpuobj_init(&engctx->gpuobj);
  119. if (ret)
  120. return ret;
  121. parent = nv_pclass(object->parent, NV_PARENT_CLASS);
  122. pardev = nv_subdev(parent->engine);
  123. if (nv_parent(parent)->context_attach) {
  124. mutex_lock(&pardev->mutex);
  125. ret = nv_parent(parent)->context_attach(parent, object);
  126. mutex_unlock(&pardev->mutex);
  127. }
  128. if (ret) {
  129. nv_error(parent, "failed to attach %s context, %d\n",
  130. subdev->name, ret);
  131. return ret;
  132. }
  133. nv_debug(parent, "attached %s context\n", subdev->name);
  134. return 0;
  135. }
  136. int
  137. nvkm_engctx_fini(struct nvkm_engctx *engctx, bool suspend)
  138. {
  139. struct nvkm_object *object = nv_object(engctx);
  140. struct nvkm_subdev *subdev = nv_subdev(object->engine);
  141. struct nvkm_object *parent;
  142. struct nvkm_subdev *pardev;
  143. int ret = 0;
  144. parent = nv_pclass(object->parent, NV_PARENT_CLASS);
  145. pardev = nv_subdev(parent->engine);
  146. if (nv_parent(parent)->context_detach) {
  147. mutex_lock(&pardev->mutex);
  148. ret = nv_parent(parent)->context_detach(parent, suspend, object);
  149. mutex_unlock(&pardev->mutex);
  150. }
  151. if (ret) {
  152. nv_error(parent, "failed to detach %s context, %d\n",
  153. subdev->name, ret);
  154. return ret;
  155. }
  156. nv_debug(parent, "detached %s context\n", subdev->name);
  157. return nvkm_gpuobj_fini(&engctx->gpuobj, suspend);
  158. }
  159. int
  160. _nvkm_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
  161. struct nvkm_oclass *oclass, void *data, u32 size,
  162. struct nvkm_object **pobject)
  163. {
  164. struct nvkm_engctx *engctx;
  165. int ret;
  166. ret = nvkm_engctx_create(parent, engine, oclass, NULL, 256, 256,
  167. NVOBJ_FLAG_ZERO_ALLOC, &engctx);
  168. *pobject = nv_object(engctx);
  169. return ret;
  170. }
  171. void
  172. _nvkm_engctx_dtor(struct nvkm_object *object)
  173. {
  174. nvkm_engctx_destroy(nv_engctx(object));
  175. }
  176. int
  177. _nvkm_engctx_init(struct nvkm_object *object)
  178. {
  179. return nvkm_engctx_init(nv_engctx(object));
  180. }
  181. int
  182. _nvkm_engctx_fini(struct nvkm_object *object, bool suspend)
  183. {
  184. return nvkm_engctx_fini(nv_engctx(object), suspend);
  185. }
  186. struct nvkm_object *
  187. nvkm_engctx_get(struct nvkm_engine *engine, u64 addr)
  188. {
  189. struct nvkm_engctx *engctx;
  190. unsigned long flags;
  191. spin_lock_irqsave(&engine->lock, flags);
  192. list_for_each_entry(engctx, &engine->contexts, head) {
  193. if (engctx->addr == addr) {
  194. engctx->save = flags;
  195. return nv_object(engctx);
  196. }
  197. }
  198. spin_unlock_irqrestore(&engine->lock, flags);
  199. return NULL;
  200. }
  201. void
  202. nvkm_engctx_put(struct nvkm_object *object)
  203. {
  204. if (object) {
  205. struct nvkm_engine *engine = nv_engine(object->engine);
  206. struct nvkm_engctx *engctx = nv_engctx(object);
  207. spin_unlock_irqrestore(&engine->lock, engctx->save);
  208. }
  209. }