mali_kbase_hwaccess_jm.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. /*
  2. *
  3. * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
  4. *
  5. * This program is free software and is provided to you under the terms of the
  6. * GNU General Public License version 2 as published by the Free Software
  7. * Foundation, and any use by you of this program is subject to the terms
  8. * of such GNU licence.
  9. *
  10. * A copy of the licence is included with the program, and can also be obtained
  11. * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  12. * Boston, MA 02110-1301, USA.
  13. *
  14. */
  15. /*
  16. * HW access job manager common APIs
  17. */
  18. #ifndef _KBASE_HWACCESS_JM_H_
  19. #define _KBASE_HWACCESS_JM_H_
  20. /**
  21. * kbase_backend_run_atom() - Run an atom on the GPU
  22. * @kbdev: Device pointer
  23. * @atom: Atom to run
  24. *
  25. * Caller must hold the HW access lock
  26. */
  27. void kbase_backend_run_atom(struct kbase_device *kbdev,
  28. struct kbase_jd_atom *katom);
  29. /**
  30. * kbase_backend_find_free_address_space() - Find a free address space.
  31. * @kbdev: Device pointer
  32. * @kctx: Context pointer
  33. *
  34. * If no address spaces are currently free, then this function can evict an
  35. * idle context from the runpool, freeing up the address space it was using.
  36. *
  37. * The address space is marked as in use. The caller must either assign a
  38. * context using kbase_gpu_use_ctx(), or release it using
  39. * kbase_gpu_release_free_address_space()
  40. *
  41. * Return: Number of free address space, or KBASEP_AS_NR_INVALID if none
  42. * available
  43. */
  44. int kbase_backend_find_free_address_space(struct kbase_device *kbdev,
  45. struct kbase_context *kctx);
  46. /**
  47. * kbase_backend_release_free_address_space() - Release an address space.
  48. * @kbdev: Device pointer
  49. * @as_nr: Address space to release
  50. *
  51. * The address space must have been returned by
  52. * kbase_gpu_find_free_address_space().
  53. */
  54. void kbase_backend_release_free_address_space(struct kbase_device *kbdev,
  55. int as_nr);
  56. /**
  57. * kbase_backend_use_ctx() - Activate a currently unscheduled context, using the
  58. * provided address space.
  59. * @kbdev: Device pointer
  60. * @kctx: Context pointer. May be NULL
  61. * @as_nr: Free address space to use
  62. *
  63. * kbase_gpu_next_job() will pull atoms from the active context.
  64. *
  65. * Return: true if successful, false if ASID not assigned. If kctx->as_pending
  66. * is true then ASID assignment will complete at some point in the
  67. * future and will re-start scheduling, otherwise no ASIDs are available
  68. */
  69. bool kbase_backend_use_ctx(struct kbase_device *kbdev,
  70. struct kbase_context *kctx,
  71. int as_nr);
  72. /**
  73. * kbase_backend_use_ctx_sched() - Activate a context.
  74. * @kbdev: Device pointer
  75. * @kctx: Context pointer
  76. *
  77. * kbase_gpu_next_job() will pull atoms from the active context.
  78. *
  79. * The context must already be scheduled and assigned to an address space. If
  80. * the context is not scheduled, then kbase_gpu_use_ctx() should be used
  81. * instead.
  82. *
  83. * Caller must hold runpool_irq.lock
  84. *
  85. * Return: true if context is now active, false otherwise (ie if context does
  86. * not have an address space assigned)
  87. */
  88. bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
  89. struct kbase_context *kctx);
  90. /**
  91. * kbase_backend_release_ctx_irq - Release a context from the GPU. This will
  92. * de-assign the assigned address space.
  93. * @kbdev: Device pointer
  94. * @kctx: Context pointer
  95. *
  96. * Caller must hold as->transaction_mutex and runpool_irq.lock
  97. */
  98. void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
  99. struct kbase_context *kctx);
  100. /**
  101. * kbase_backend_release_ctx_noirq - Release a context from the GPU. This will
  102. * de-assign the assigned address space.
  103. * @kbdev: Device pointer
  104. * @kctx: Context pointer
  105. *
  106. * Caller must hold as->transaction_mutex
  107. *
  108. * This function must perform any operations that could not be performed in IRQ
  109. * context by kbase_backend_release_ctx_irq().
  110. */
  111. void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
  112. struct kbase_context *kctx);
  113. /**
  114. * kbase_backend_complete_wq() - Perform backend-specific actions required on
  115. * completing an atom.
  116. * @kbdev: Device pointer
  117. * @katom: Pointer to the atom to complete
  118. *
  119. * This function should only be called from kbase_jd_done_worker() or
  120. * js_return_worker().
  121. *
  122. * Return: true if atom has completed, false if atom should be re-submitted
  123. */
  124. void kbase_backend_complete_wq(struct kbase_device *kbdev,
  125. struct kbase_jd_atom *katom);
  126. /**
  127. * kbase_backend_complete_wq_post_sched - Perform backend-specific actions
  128. * required on completing an atom, after
  129. * any scheduling has taken place.
  130. * @kbdev: Device pointer
  131. * @core_req: Core requirements of atom
  132. * @affinity: Affinity of atom
  133. * @coreref_state: Coreref state of atom
  134. *
  135. * This function should only be called from kbase_jd_done_worker() or
  136. * js_return_worker().
  137. */
  138. void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
  139. base_jd_core_req core_req, u64 affinity,
  140. enum kbase_atom_coreref_state coreref_state);
  141. /**
  142. * kbase_backend_reset() - The GPU is being reset. Cancel all jobs on the GPU
  143. * and remove any others from the ringbuffers.
  144. * @kbdev: Device pointer
  145. * @end_timestamp: Timestamp of reset
  146. */
  147. void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp);
  148. /**
  149. * kbase_backend_inspect_head() - Return the atom currently at the head of slot
  150. * @js
  151. * @kbdev: Device pointer
  152. * @js: Job slot to inspect
  153. *
  154. * Return : Atom currently at the head of slot @js, or NULL
  155. */
  156. struct kbase_jd_atom *kbase_backend_inspect_head(struct kbase_device *kbdev,
  157. int js);
  158. /**
  159. * kbase_backend_inspect_tail - Return the atom currently at the tail of slot
  160. * @js
  161. * @kbdev: Device pointer
  162. * @js: Job slot to inspect
  163. *
  164. * Return : Atom currently at the head of slot @js, or NULL
  165. */
  166. struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev,
  167. int js);
  168. /**
  169. * kbase_backend_nr_atoms_on_slot() - Return the number of atoms currently on a
  170. * slot.
  171. * @kbdev: Device pointer
  172. * @js: Job slot to inspect
  173. *
  174. * Return : Number of atoms currently on slot
  175. */
  176. int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js);
  177. /**
  178. * kbase_backend_nr_atoms_submitted() - Return the number of atoms on a slot
  179. * that are currently on the GPU.
  180. * @kbdev: Device pointer
  181. * @js: Job slot to inspect
  182. *
  183. * Return : Number of atoms currently on slot @js that are currently on the GPU.
  184. */
  185. int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js);
  186. /**
  187. * kbase_backend_ctx_count_changed() - Number of contexts ready to submit jobs
  188. * has changed.
  189. * @kbdev: Device pointer
  190. *
  191. * Perform any required backend-specific actions (eg starting/stopping
  192. * scheduling timers).
  193. */
  194. void kbase_backend_ctx_count_changed(struct kbase_device *kbdev);
  195. /**
  196. * kbase_backend_slot_free() - Return the number of jobs that can be currently
  197. * submitted to slot @js.
  198. * @kbdev: Device pointer
  199. * @js: Job slot to inspect
  200. *
  201. * Return : Number of jobs that can be submitted.
  202. */
  203. int kbase_backend_slot_free(struct kbase_device *kbdev, int js);
  204. /**
  205. * kbase_job_check_enter_disjoint - potentially leave disjoint state
  206. * @kbdev: kbase device
  207. * @target_katom: atom which is finishing
  208. *
  209. * Work out whether to leave disjoint state when finishing an atom that was
  210. * originated by kbase_job_check_enter_disjoint().
  211. */
  212. void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
  213. struct kbase_jd_atom *target_katom);
  214. /**
  215. * kbase_backend_jm_kill_jobs_from_kctx - Kill all jobs that are currently
  216. * running from a context
  217. * @kctx: Context pointer
  218. *
  219. * This is used in response to a page fault to remove all jobs from the faulting
  220. * context from the hardware.
  221. */
  222. void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx);
  223. /**
  224. * kbase_jm_wait_for_zero_jobs - Wait for context to have zero jobs running, and
  225. * to be descheduled.
  226. * @kctx: Context pointer
  227. *
  228. * This should be called following kbase_js_zap_context(), to ensure the context
  229. * can be safely destroyed.
  230. */
  231. void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx);
  232. /**
  233. * kbase_backend_get_current_flush_id - Return the current flush ID
  234. *
  235. * @kbdev: Device pointer
  236. *
  237. * Return: the current flush ID to be recorded for each job chain
  238. */
  239. u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev);
  240. #if KBASE_GPU_RESET_EN
  241. /**
  242. * kbase_prepare_to_reset_gpu - Prepare for resetting the GPU.
  243. * @kbdev: Device pointer
  244. *
  245. * This function just soft-stops all the slots to ensure that as many jobs as
  246. * possible are saved.
  247. *
  248. * Return: a boolean which should be interpreted as follows:
  249. * - true - Prepared for reset, kbase_reset_gpu should be called.
  250. * - false - Another thread is performing a reset, kbase_reset_gpu should
  251. * not be called.
  252. */
  253. bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev);
  254. /**
  255. * kbase_reset_gpu - Reset the GPU
  256. * @kbdev: Device pointer
  257. *
  258. * This function should be called after kbase_prepare_to_reset_gpu if it returns
  259. * true. It should never be called without a corresponding call to
  260. * kbase_prepare_to_reset_gpu.
  261. *
  262. * After this function is called (or not called if kbase_prepare_to_reset_gpu
  263. * returned false), the caller should wait for kbdev->reset_waitq to be
  264. * signalled to know when the reset has completed.
  265. */
  266. void kbase_reset_gpu(struct kbase_device *kbdev);
  267. /**
  268. * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU.
  269. * @kbdev: Device pointer
  270. *
  271. * This function just soft-stops all the slots to ensure that as many jobs as
  272. * possible are saved.
  273. *
  274. * Return: a boolean which should be interpreted as follows:
  275. * - true - Prepared for reset, kbase_reset_gpu should be called.
  276. * - false - Another thread is performing a reset, kbase_reset_gpu should
  277. * not be called.
  278. */
  279. bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev);
  280. /**
  281. * kbase_reset_gpu_locked - Reset the GPU
  282. * @kbdev: Device pointer
  283. *
  284. * This function should be called after kbase_prepare_to_reset_gpu if it
  285. * returns true. It should never be called without a corresponding call to
  286. * kbase_prepare_to_reset_gpu.
  287. *
  288. * After this function is called (or not called if kbase_prepare_to_reset_gpu
  289. * returned false), the caller should wait for kbdev->reset_waitq to be
  290. * signalled to know when the reset has completed.
  291. */
  292. void kbase_reset_gpu_locked(struct kbase_device *kbdev);
  293. #endif // KBASE_GPU_RESET_EN
  294. /**
  295. * kbase_job_slot_hardstop - Hard-stop the specified job slot
  296. * @kctx: The kbase context that contains the job(s) that should
  297. * be hard-stopped
  298. * @js: The job slot to hard-stop
  299. * @target_katom: The job that should be hard-stopped (or NULL for all
  300. * jobs from the context)
  301. * Context:
  302. * The job slot lock must be held when calling this function.
  303. */
  304. void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
  305. struct kbase_jd_atom *target_katom);
  306. #endif // ifndef _KBASE_HWACCESS_JM_H_