mali_kbase.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. /*
  2. *
  3. * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
  4. *
  5. * This program is free software and is provided to you under the terms of the
  6. * GNU General Public License version 2 as published by the Free Software
  7. * Foundation, and any use by you of this program is subject to the terms
  8. * of such GNU licence.
  9. *
  10. * A copy of the licence is included with the program, and can also be obtained
  11. * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  12. * Boston, MA 02110-1301, USA.
  13. *
  14. */
  15. #ifndef _KBASE_H_
  16. #define _KBASE_H_
  17. #include <mali_malisw.h>
  18. #include <mali_kbase_debug.h>
  19. #include <asm/page.h>
  20. #include <linux/atomic.h>
  21. #include <linux/highmem.h>
  22. #include <linux/hrtimer.h>
  23. #include <linux/ktime.h>
  24. #include <linux/list.h>
  25. #include <linux/mm_types.h>
  26. #include <linux/mutex.h>
  27. #include <linux/rwsem.h>
  28. #include <linux/sched.h>
  29. #include <linux/slab.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/vmalloc.h>
  32. #include <linux/wait.h>
  33. #include <linux/workqueue.h>
  34. #include "mali_base_kernel.h"
  35. #include <mali_kbase_uku.h>
  36. #include <mali_kbase_linux.h>
  37. #include "mali_kbase_strings.h"
  38. #include "mali_kbase_pm.h"
  39. #include "mali_kbase_mem_lowlevel.h"
  40. #include "mali_kbase_defs.h"
  41. #include "mali_kbase_trace_timeline.h"
  42. #include "mali_kbase_js.h"
  43. #include "mali_kbase_mem.h"
  44. #include "mali_kbase_utility.h"
  45. #include "mali_kbase_gpu_memory_debugfs.h"
  46. #include "mali_kbase_mem_profile_debugfs.h"
  47. #include "mali_kbase_debug_job_fault.h"
  48. #include "mali_kbase_jd_debugfs.h"
  49. #include "mali_kbase_gpuprops.h"
  50. #include "mali_kbase_jm.h"
  51. #include "mali_kbase_vinstr.h"
  52. #include "mali_kbase_ipa.h"
  53. #ifdef CONFIG_GPU_TRACEPOINTS
  54. #include <trace/events/gpu.h>
  55. #endif
  56. /**
  57. * @page page_base_kernel_main Kernel-side Base (KBase) APIs
  58. *
  59. * The Kernel-side Base (KBase) APIs are divided up as follows:
  60. * - @subpage page_kbase_js_policy
  61. */
  62. /**
  63. * @defgroup base_kbase_api Kernel-side Base (KBase) APIs
  64. */
  65. struct kbase_device *kbase_device_alloc(void);
  66. /*
  67. * note: configuration attributes member of kbdev needs to have
  68. * been setup before calling kbase_device_init
  69. */
  70. /*
  71. * API to acquire device list semaphore and return pointer
  72. * to the device list head
  73. */
  74. const struct list_head *kbase_dev_list_get(void);
  75. /* API to release the device list semaphore */
  76. void kbase_dev_list_put(const struct list_head *dev_list);
  77. int kbase_device_init(struct kbase_device * const kbdev);
  78. void kbase_device_term(struct kbase_device *kbdev);
  79. void kbase_device_free(struct kbase_device *kbdev);
  80. int kbase_device_has_feature(struct kbase_device *kbdev, u32 feature);
  81. /* Needed for gator integration and for reporting vsync information */
  82. struct kbase_device *kbase_find_device(int minor);
  83. void kbase_release_device(struct kbase_device *kbdev);
  84. void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value);
  85. u32 kbase_get_profiling_control(struct kbase_device *kbdev, u32 control);
  86. struct kbase_context *
  87. kbase_create_context(struct kbase_device *kbdev, bool is_compat);
  88. void kbase_destroy_context(struct kbase_context *kctx);
  89. int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags);
  90. int kbase_jd_init(struct kbase_context *kctx);
  91. void kbase_jd_exit(struct kbase_context *kctx);
  92. #ifdef BASE_LEGACY_UK6_SUPPORT
  93. int kbase_jd_submit(struct kbase_context *kctx,
  94. const struct kbase_uk_job_submit *submit_data,
  95. int uk6_atom);
  96. #else
  97. int kbase_jd_submit(struct kbase_context *kctx,
  98. const struct kbase_uk_job_submit *submit_data);
  99. #endif
  100. /**
  101. * kbase_jd_done_worker - Handle a job completion
  102. * @data: a &struct work_struct
  103. *
  104. * This function requeues the job from the runpool (if it was soft-stopped or
  105. * removed from NEXT registers).
  106. *
  107. * Removes it from the system if it finished/failed/was cancelled.
  108. *
  109. * Resolves dependencies to add dependent jobs to the context, potentially
  110. * starting them if necessary (which may add more references to the context)
  111. *
  112. * Releases the reference to the context from the no-longer-running job.
  113. *
  114. * Handles retrying submission outside of IRQ context if it failed from within
  115. * IRQ context.
  116. */
  117. void kbase_jd_done_worker(struct work_struct *data);
  118. void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
  119. kbasep_js_atom_done_code done_code);
  120. void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
  121. void kbase_jd_zap_context(struct kbase_context *kctx);
  122. bool jd_done_nolock(struct kbase_jd_atom *katom,
  123. struct list_head *completed_jobs_ctx);
  124. void kbase_jd_free_external_resources(struct kbase_jd_atom *katom);
  125. bool jd_submit_atom(struct kbase_context *kctx,
  126. const struct base_jd_atom_v2 *user_atom,
  127. struct kbase_jd_atom *katom);
  128. void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom);
  129. void kbase_job_done(struct kbase_device *kbdev, u32 done);
  130. void kbase_gpu_cacheclean(struct kbase_device *kbdev,
  131. struct kbase_jd_atom *katom);
  132. /**
  133. * kbase_job_slot_ctx_priority_check_locked(): - Check for lower priority atoms
  134. * and soft stop them
  135. * @kctx: Pointer to context to check.
  136. * @katom: Pointer to priority atom.
  137. *
  138. * Atoms from @kctx on the same job slot as @katom, which have lower priority
  139. * than @katom will be soft stopped and put back in the queue, so that atoms
  140. * with higher priority can run.
  141. *
  142. * The js_data.runpool_irq.lock must be held when calling this function.
  143. */
  144. void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
  145. struct kbase_jd_atom *katom);
  146. void kbase_job_slot_softstop(struct kbase_device *kbdev, int js,
  147. struct kbase_jd_atom *target_katom);
  148. void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js,
  149. struct kbase_jd_atom *target_katom, u32 sw_flags);
  150. void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
  151. struct kbase_jd_atom *target_katom);
  152. void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
  153. u16 core_reqs, struct kbase_jd_atom *target_katom);
  154. void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
  155. struct kbase_jd_atom *target_katom);
  156. void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *event);
  157. int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent);
  158. int kbase_event_pending(struct kbase_context *ctx);
  159. int kbase_event_init(struct kbase_context *kctx);
  160. void kbase_event_close(struct kbase_context *kctx);
  161. void kbase_event_cleanup(struct kbase_context *kctx);
  162. void kbase_event_wakeup(struct kbase_context *kctx);
  163. int kbase_process_soft_job(struct kbase_jd_atom *katom);
  164. int kbase_prepare_soft_job(struct kbase_jd_atom *katom);
  165. void kbase_finish_soft_job(struct kbase_jd_atom *katom);
  166. void kbase_cancel_soft_job(struct kbase_jd_atom *katom);
  167. void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev);
  168. void kbasep_add_waiting_soft_job(struct kbase_jd_atom *katom);
  169. bool kbase_replay_process(struct kbase_jd_atom *katom);
  170. enum hrtimer_restart kbasep_soft_event_timeout_worker(struct hrtimer *timer);
  171. void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt);
  172. int kbasep_read_soft_event_status(
  173. struct kbase_context *kctx, u64 evt, unsigned char *status);
  174. int kbasep_write_soft_event_status(
  175. struct kbase_context *kctx, u64 evt, unsigned char new_status);
  176. /* api used internally for register access. Contains validation and tracing */
  177. void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value);
  178. int kbase_device_trace_buffer_install(
  179. struct kbase_context *kctx, u32 *tb, size_t size);
  180. void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx);
  181. /* api to be ported per OS, only need to do the raw register access */
  182. void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value);
  183. u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset);
  184. void kbasep_as_do_poke(struct work_struct *work);
  185. /** Returns the name associated with a Mali exception code
  186. *
  187. * This function is called from the interrupt handler when a GPU fault occurs.
  188. * It reports the details of the fault using KBASE_DEBUG_PRINT_WARN.
  189. *
  190. * @param[in] kbdev The kbase device that the GPU fault occurred from.
  191. * @param[in] exception_code exception code
  192. * @return name associated with the exception code
  193. */
  194. const char *kbase_exception_name(struct kbase_device *kbdev,
  195. u32 exception_code);
  196. /**
  197. * Check whether a system suspend is in progress, or has already been suspended
  198. *
  199. * The caller should ensure that either kbdev->pm.active_count_lock is held, or
  200. * a dmb was executed recently (to ensure the value is most
  201. * up-to-date). However, without a lock the value could change afterwards.
  202. *
  203. * @return false if a suspend is not in progress
  204. * @return !=false otherwise
  205. */
  206. static inline bool kbase_pm_is_suspending(struct kbase_device *kbdev)
  207. {
  208. return kbdev->pm.suspending;
  209. }
  210. /**
  211. * Return the atom's ID, as was originally supplied by userspace in
  212. * base_jd_atom_v2::atom_number
  213. */
  214. static inline int kbase_jd_atom_id(struct kbase_context *kctx, struct kbase_jd_atom *katom)
  215. {
  216. int result;
  217. KBASE_DEBUG_ASSERT(kctx);
  218. KBASE_DEBUG_ASSERT(katom);
  219. KBASE_DEBUG_ASSERT(katom->kctx == kctx);
  220. result = katom - &kctx->jctx.atoms[0];
  221. KBASE_DEBUG_ASSERT(result >= 0 && result <= BASE_JD_ATOM_COUNT);
  222. return result;
  223. }
  224. /**
  225. * kbase_jd_atom_from_id - Return the atom structure for the given atom ID
  226. * @kctx: Context pointer
  227. * @id: ID of atom to retrieve
  228. *
  229. * Return: Pointer to struct kbase_jd_atom associated with the supplied ID
  230. */
  231. static inline struct kbase_jd_atom *kbase_jd_atom_from_id(
  232. struct kbase_context *kctx, int id)
  233. {
  234. return &kctx->jctx.atoms[id];
  235. }
  236. /**
  237. * Initialize the disjoint state
  238. *
  239. * The disjoint event count and state are both set to zero.
  240. *
  241. * Disjoint functions usage:
  242. *
  243. * The disjoint event count should be incremented whenever a disjoint event occurs.
  244. *
  245. * There are several cases which are regarded as disjoint behavior. Rather than just increment
  246. * the counter during disjoint events we also increment the counter when jobs may be affected
  247. * by what the GPU is currently doing. To facilitate this we have the concept of disjoint state.
  248. *
  249. * Disjoint state is entered during GPU reset and for the entire time that an atom is replaying
  250. * (as part of the replay workaround). Increasing the disjoint state also increases the count of
  251. * disjoint events.
  252. *
  253. * The disjoint state is then used to increase the count of disjoint events during job submission
  254. * and job completion. Any atom submitted or completed while the disjoint state is greater than
  255. * zero is regarded as a disjoint event.
  256. *
  257. * The disjoint event counter is also incremented immediately whenever a job is soft stopped
  258. * and during context creation.
  259. *
  260. * @param kbdev The kbase device
  261. */
  262. void kbase_disjoint_init(struct kbase_device *kbdev);
  263. /**
  264. * Increase the count of disjoint events
  265. * called when a disjoint event has happened
  266. *
  267. * @param kbdev The kbase device
  268. */
  269. void kbase_disjoint_event(struct kbase_device *kbdev);
  270. /**
  271. * Increase the count of disjoint events only if the GPU is in a disjoint state
  272. *
  273. * This should be called when something happens which could be disjoint if the GPU
  274. * is in a disjoint state. The state refcount keeps track of this.
  275. *
  276. * @param kbdev The kbase device
  277. */
  278. void kbase_disjoint_event_potential(struct kbase_device *kbdev);
  279. /**
  280. * Returns the count of disjoint events
  281. *
  282. * @param kbdev The kbase device
  283. * @return the count of disjoint events
  284. */
  285. u32 kbase_disjoint_event_get(struct kbase_device *kbdev);
  286. /**
  287. * Increment the refcount state indicating that the GPU is in a disjoint state.
  288. *
  289. * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
  290. * eventually after the disjoint state has completed @ref kbase_disjoint_state_down
  291. * should be called
  292. *
  293. * @param kbdev The kbase device
  294. */
  295. void kbase_disjoint_state_up(struct kbase_device *kbdev);
  296. /**
  297. * Decrement the refcount state
  298. *
  299. * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
  300. *
  301. * Called after @ref kbase_disjoint_state_up once the disjoint state is over
  302. *
  303. * @param kbdev The kbase device
  304. */
  305. void kbase_disjoint_state_down(struct kbase_device *kbdev);
  306. /**
  307. * If a job is soft stopped and the number of contexts is >= this value
  308. * it is reported as a disjoint event
  309. */
  310. #define KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD 2
  311. #if !defined(UINT64_MAX)
  312. #define UINT64_MAX ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
  313. #endif
  314. #if KBASE_TRACE_ENABLE
  315. void kbasep_trace_debugfs_init(struct kbase_device *kbdev);
  316. #ifndef CONFIG_MALI_SYSTEM_TRACE
  317. /** Add trace values about a job-slot
  318. *
  319. * @note Any functions called through this macro will still be evaluated in
  320. * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
  321. * functions called to get the parameters supplied to this macro must:
  322. * - be static or static inline
  323. * - must just return 0 and have no other statements present in the body.
  324. */
  325. #define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot) \
  326. kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
  327. KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, 0)
  328. /** Add trace values about a job-slot, with info
  329. *
  330. * @note Any functions called through this macro will still be evaluated in
  331. * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
  332. * functions called to get the parameters supplied to this macro must:
  333. * - be static or static inline
  334. * - must just return 0 and have no other statements present in the body.
  335. */
  336. #define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val) \
  337. kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
  338. KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, info_val)
  339. /** Add trace values about a ctx refcount
  340. *
  341. * @note Any functions called through this macro will still be evaluated in
  342. * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
  343. * functions called to get the parameters supplied to this macro must:
  344. * - be static or static inline
  345. * - must just return 0 and have no other statements present in the body.
  346. */
  347. #define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount) \
  348. kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
  349. KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, 0)
  350. /** Add trace values about a ctx refcount, and info
  351. *
  352. * @note Any functions called through this macro will still be evaluated in
  353. * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
  354. * functions called to get the parameters supplied to this macro must:
  355. * - be static or static inline
  356. * - must just return 0 and have no other statements present in the body.
  357. */
  358. #define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val) \
  359. kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
  360. KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, info_val)
  361. /** Add trace values (no slot or refcount)
  362. *
  363. * @note Any functions called through this macro will still be evaluated in
  364. * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
  365. * functions called to get the parameters supplied to this macro must:
  366. * - be static or static inline
  367. * - must just return 0 and have no other statements present in the body.
  368. */
  369. #define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val) \
  370. kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
  371. 0, 0, 0, info_val)
  372. /** Clear the trace */
  373. #define KBASE_TRACE_CLEAR(kbdev) \
  374. kbasep_trace_clear(kbdev)
  375. /** Dump the slot trace */
  376. #define KBASE_TRACE_DUMP(kbdev) \
  377. kbasep_trace_dump(kbdev)
  378. /** PRIVATE - do not use directly. Use KBASE_TRACE_ADD() instead */
  379. void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val);
  380. /** PRIVATE - do not use directly. Use KBASE_TRACE_CLEAR() instead */
  381. void kbasep_trace_clear(struct kbase_device *kbdev);
  382. #else /* #ifndef CONFIG_MALI_SYSTEM_TRACE */
  383. /* Dispatch kbase trace events as system trace events */
  384. #include <mali_linux_kbase_trace.h>
  385. #define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\
  386. trace_mali_##code(jobslot, 0)
  387. #define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\
  388. trace_mali_##code(jobslot, info_val)
  389. #define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\
  390. trace_mali_##code(refcount, 0)
  391. #define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\
  392. trace_mali_##code(refcount, info_val)
  393. #define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val)\
  394. trace_mali_##code(gpu_addr, info_val)
  395. #define KBASE_TRACE_CLEAR(kbdev)\
  396. do {\
  397. CSTD_UNUSED(kbdev);\
  398. CSTD_NOP(0);\
  399. } while (0)
  400. #define KBASE_TRACE_DUMP(kbdev)\
  401. do {\
  402. CSTD_UNUSED(kbdev);\
  403. CSTD_NOP(0);\
  404. } while (0)
  405. #endif /* #ifndef CONFIG_MALI_SYSTEM_TRACE */
  406. #else
  407. #define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\
  408. do {\
  409. CSTD_UNUSED(kbdev);\
  410. CSTD_NOP(code);\
  411. CSTD_UNUSED(ctx);\
  412. CSTD_UNUSED(katom);\
  413. CSTD_UNUSED(gpu_addr);\
  414. CSTD_UNUSED(jobslot);\
  415. } while (0)
  416. #define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\
  417. do {\
  418. CSTD_UNUSED(kbdev);\
  419. CSTD_NOP(code);\
  420. CSTD_UNUSED(ctx);\
  421. CSTD_UNUSED(katom);\
  422. CSTD_UNUSED(gpu_addr);\
  423. CSTD_UNUSED(jobslot);\
  424. CSTD_UNUSED(info_val);\
  425. CSTD_NOP(0);\
  426. } while (0)
  427. #define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\
  428. do {\
  429. CSTD_UNUSED(kbdev);\
  430. CSTD_NOP(code);\
  431. CSTD_UNUSED(ctx);\
  432. CSTD_UNUSED(katom);\
  433. CSTD_UNUSED(gpu_addr);\
  434. CSTD_UNUSED(refcount);\
  435. CSTD_NOP(0);\
  436. } while (0)
  437. #define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\
  438. do {\
  439. CSTD_UNUSED(kbdev);\
  440. CSTD_NOP(code);\
  441. CSTD_UNUSED(ctx);\
  442. CSTD_UNUSED(katom);\
  443. CSTD_UNUSED(gpu_addr);\
  444. CSTD_UNUSED(info_val);\
  445. CSTD_NOP(0);\
  446. } while (0)
  447. #define KBASE_TRACE_ADD(kbdev, code, subcode, ctx, katom, val)\
  448. do {\
  449. CSTD_UNUSED(kbdev);\
  450. CSTD_NOP(code);\
  451. CSTD_UNUSED(subcode);\
  452. CSTD_UNUSED(ctx);\
  453. CSTD_UNUSED(katom);\
  454. CSTD_UNUSED(val);\
  455. CSTD_NOP(0);\
  456. } while (0)
  457. #define KBASE_TRACE_CLEAR(kbdev)\
  458. do {\
  459. CSTD_UNUSED(kbdev);\
  460. CSTD_NOP(0);\
  461. } while (0)
  462. #define KBASE_TRACE_DUMP(kbdev)\
  463. do {\
  464. CSTD_UNUSED(kbdev);\
  465. CSTD_NOP(0);\
  466. } while (0)
  467. #endif /* KBASE_TRACE_ENABLE */
  468. /** PRIVATE - do not use directly. Use KBASE_TRACE_DUMP() instead */
  469. void kbasep_trace_dump(struct kbase_device *kbdev);
  470. #ifdef CONFIG_MALI_DEBUG
  471. /**
  472. * kbase_set_driver_inactive - Force driver to go inactive
  473. * @kbdev: Device pointer
  474. * @inactive: true if driver should go inactive, false otherwise
  475. *
  476. * Forcing the driver inactive will cause all future IOCTLs to wait until the
  477. * driver is made active again. This is intended solely for the use of tests
  478. * which require that no jobs are running while the test executes.
  479. */
  480. void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive);
  481. #endif /* CONFIG_MALI_DEBUG */
  482. #endif