mali_kbase_event.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /*
  2. *
  3. * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
  4. *
  5. * This program is free software and is provided to you under the terms of the
  6. * GNU General Public License version 2 as published by the Free Software
  7. * Foundation, and any use by you of this program is subject to the terms
  8. * of such GNU licence.
  9. *
  10. * A copy of the licence is included with the program, and can also be obtained
  11. * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  12. * Boston, MA 02110-1301, USA.
  13. *
  14. */
  15. #include <mali_kbase.h>
  16. #include <mali_kbase_debug.h>
  17. #include <mali_kbase_tlstream.h>
  18. static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom)
  19. {
  20. struct base_jd_udata data;
  21. lockdep_assert_held(&kctx->jctx.lock);
  22. KBASE_DEBUG_ASSERT(kctx != NULL);
  23. KBASE_DEBUG_ASSERT(katom != NULL);
  24. KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
  25. data = katom->udata;
  26. KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(1, &kctx->timeline.jd_atoms_in_flight));
  27. kbase_tlstream_tl_nret_atom_ctx(katom, kctx);
  28. kbase_tlstream_tl_del_atom(katom);
  29. katom->status = KBASE_JD_ATOM_STATE_UNUSED;
  30. wake_up(&katom->completed);
  31. return data;
  32. }
  33. int kbase_event_pending(struct kbase_context *ctx)
  34. {
  35. KBASE_DEBUG_ASSERT(ctx);
  36. return (atomic_read(&ctx->event_count) != 0) ||
  37. (atomic_read(&ctx->event_closed) != 0);
  38. }
  39. int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent)
  40. {
  41. struct kbase_jd_atom *atom;
  42. KBASE_DEBUG_ASSERT(ctx);
  43. mutex_lock(&ctx->event_mutex);
  44. if (list_empty(&ctx->event_list)) {
  45. if (!atomic_read(&ctx->event_closed)) {
  46. mutex_unlock(&ctx->event_mutex);
  47. return -1;
  48. }
  49. /* generate the BASE_JD_EVENT_DRV_TERMINATED message on the fly */
  50. mutex_unlock(&ctx->event_mutex);
  51. uevent->event_code = BASE_JD_EVENT_DRV_TERMINATED;
  52. memset(&uevent->udata, 0, sizeof(uevent->udata));
  53. dev_dbg(ctx->kbdev->dev,
  54. "event system closed, returning BASE_JD_EVENT_DRV_TERMINATED(0x%X)\n",
  55. BASE_JD_EVENT_DRV_TERMINATED);
  56. return 0;
  57. }
  58. /* normal event processing */
  59. atomic_dec(&ctx->event_count);
  60. atom = list_entry(ctx->event_list.next, struct kbase_jd_atom, dep_item[0]);
  61. list_del(ctx->event_list.next);
  62. mutex_unlock(&ctx->event_mutex);
  63. dev_dbg(ctx->kbdev->dev, "event dequeuing %p\n", (void *)atom);
  64. uevent->event_code = atom->event_code;
  65. uevent->atom_number = (atom - ctx->jctx.atoms);
  66. if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
  67. kbase_jd_free_external_resources(atom);
  68. mutex_lock(&ctx->jctx.lock);
  69. uevent->udata = kbase_event_process(ctx, atom);
  70. mutex_unlock(&ctx->jctx.lock);
  71. return 0;
  72. }
  73. /**
  74. * kbase_event_process_noreport_worker - Worker for processing atoms that do not
  75. * return an event but do have external
  76. * resources
  77. * @data: Work structure
  78. */
  79. static void kbase_event_process_noreport_worker(struct work_struct *data)
  80. {
  81. struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
  82. work);
  83. struct kbase_context *kctx = katom->kctx;
  84. if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
  85. kbase_jd_free_external_resources(katom);
  86. mutex_lock(&kctx->jctx.lock);
  87. kbase_event_process(kctx, katom);
  88. mutex_unlock(&kctx->jctx.lock);
  89. }
  90. /**
  91. * kbase_event_process_noreport - Process atoms that do not return an event
  92. * @kctx: Context pointer
  93. * @katom: Atom to be processed
  94. *
  95. * Atoms that do not have external resources will be processed immediately.
  96. * Atoms that do have external resources will be processed on a workqueue, in
  97. * order to avoid locking issues.
  98. */
  99. static void kbase_event_process_noreport(struct kbase_context *kctx,
  100. struct kbase_jd_atom *katom)
  101. {
  102. if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
  103. INIT_WORK(&katom->work, kbase_event_process_noreport_worker);
  104. queue_work(kctx->event_workq, &katom->work);
  105. } else {
  106. kbase_event_process(kctx, katom);
  107. }
  108. }
  109. /**
  110. * kbase_event_coalesce - Move pending events to the main event list
  111. * @kctx: Context pointer
  112. *
  113. * kctx->event_list and kctx->event_coalesce_count must be protected
  114. * by a lock unless this is the last thread using them
  115. * (and we're about to terminate the lock).
  116. *
  117. * Return: The number of pending events moved to the main event list
  118. */
  119. static int kbase_event_coalesce(struct kbase_context *kctx)
  120. {
  121. const int event_count = kctx->event_coalesce_count;
  122. /* Join the list of pending events onto the tail of the main list
  123. and reset it */
  124. list_splice_tail_init(&kctx->event_coalesce_list, &kctx->event_list);
  125. kctx->event_coalesce_count = 0;
  126. /* Return the number of events moved */
  127. return event_count;
  128. }
  129. void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *atom)
  130. {
  131. if (atom->core_req & BASE_JD_REQ_EVENT_ONLY_ON_FAILURE) {
  132. if (atom->event_code == BASE_JD_EVENT_DONE) {
  133. /* Don't report the event */
  134. kbase_event_process_noreport(ctx, atom);
  135. return;
  136. }
  137. }
  138. if (atom->core_req & BASEP_JD_REQ_EVENT_NEVER) {
  139. /* Don't report the event */
  140. kbase_event_process_noreport(ctx, atom);
  141. return;
  142. }
  143. if (atom->core_req & BASE_JD_REQ_EVENT_COALESCE) {
  144. /* Don't report the event until other event(s) have completed */
  145. mutex_lock(&ctx->event_mutex);
  146. list_add_tail(&atom->dep_item[0], &ctx->event_coalesce_list);
  147. ++ctx->event_coalesce_count;
  148. mutex_unlock(&ctx->event_mutex);
  149. } else {
  150. /* Report the event and any pending events now */
  151. int event_count = 1;
  152. mutex_lock(&ctx->event_mutex);
  153. event_count += kbase_event_coalesce(ctx);
  154. list_add_tail(&atom->dep_item[0], &ctx->event_list);
  155. atomic_add(event_count, &ctx->event_count);
  156. mutex_unlock(&ctx->event_mutex);
  157. kbase_event_wakeup(ctx);
  158. }
  159. }
  160. void kbase_event_close(struct kbase_context *kctx)
  161. {
  162. mutex_lock(&kctx->event_mutex);
  163. atomic_set(&kctx->event_closed, true);
  164. mutex_unlock(&kctx->event_mutex);
  165. kbase_event_wakeup(kctx);
  166. }
  167. int kbase_event_init(struct kbase_context *kctx)
  168. {
  169. KBASE_DEBUG_ASSERT(kctx);
  170. INIT_LIST_HEAD(&kctx->event_list);
  171. INIT_LIST_HEAD(&kctx->event_coalesce_list);
  172. mutex_init(&kctx->event_mutex);
  173. atomic_set(&kctx->event_count, 0);
  174. kctx->event_coalesce_count = 0;
  175. atomic_set(&kctx->event_closed, false);
  176. kctx->event_workq = alloc_workqueue("kbase_event", WQ_MEM_RECLAIM, 1);
  177. if (kctx->event_workq == NULL)
  178. return -EINVAL;
  179. return 0;
  180. }
  181. void kbase_event_cleanup(struct kbase_context *kctx)
  182. {
  183. int event_count;
  184. KBASE_DEBUG_ASSERT(kctx);
  185. KBASE_DEBUG_ASSERT(kctx->event_workq);
  186. flush_workqueue(kctx->event_workq);
  187. destroy_workqueue(kctx->event_workq);
  188. /* We use kbase_event_dequeue to remove the remaining events as that
  189. * deals with all the cleanup needed for the atoms.
  190. *
  191. * Note: use of kctx->event_list without a lock is safe because this must be the last
  192. * thread using it (because we're about to terminate the lock)
  193. */
  194. event_count = kbase_event_coalesce(kctx);
  195. atomic_add(event_count, &kctx->event_count);
  196. while (!list_empty(&kctx->event_list)) {
  197. struct base_jd_event_v2 event;
  198. kbase_event_dequeue(kctx, &event);
  199. }
  200. }