mali_kbase_vinstr.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813
  1. /*
  2. *
  3. * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
  4. *
  5. * This program is free software and is provided to you under the terms of the
  6. * GNU General Public License version 2 as published by the Free Software
  7. * Foundation, and any use by you of this program is subject to the terms
  8. * of such GNU licence.
  9. *
  10. * A copy of the licence is included with the program, and can also be obtained
  11. * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  12. * Boston, MA 02110-1301, USA.
  13. *
  14. */
  15. #include <linux/anon_inodes.h>
  16. #include <linux/atomic.h>
  17. #include <linux/hrtimer.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/kthread.h>
  20. #include <linux/list.h>
  21. #include <linux/mm.h>
  22. #include <linux/poll.h>
  23. #include <linux/preempt.h>
  24. #include <linux/slab.h>
  25. #include <linux/wait.h>
  26. #include <mali_kbase.h>
  27. #include <mali_kbase_hwcnt_reader.h>
  28. #include <mali_kbase_mem_linux.h>
  29. #include <mali_kbase_tlstream.h>
  30. /*****************************************************************************/
  31. /* Hwcnt reader API version */
  32. #define HWCNT_READER_API 1
  33. /* The number of nanoseconds in a second. */
  34. #define NSECS_IN_SEC 1000000000ull /* ns */
  35. /* The time resolution of dumping service. */
  36. #define DUMPING_RESOLUTION 500000ull /* ns */
  37. /* The maximal supported number of dumping buffers. */
  38. #define MAX_BUFFER_COUNT 32
  39. /* Size and number of hw counters blocks. */
  40. #define NR_CNT_BLOCKS_PER_GROUP 8
  41. #define NR_CNT_PER_BLOCK 64
  42. #define NR_BYTES_PER_CNT 4
  43. #define NR_BYTES_PER_HDR 16
  44. #define PRFCNT_EN_MASK_OFFSET 0x8
  45. /*****************************************************************************/
  46. enum {
  47. SHADER_HWCNT_BM,
  48. TILER_HWCNT_BM,
  49. MMU_L2_HWCNT_BM,
  50. JM_HWCNT_BM
  51. };
  52. /**
  53. * struct kbase_vinstr_context - vinstr context per device
  54. * @lock: protects the entire vinstr context
  55. * @kbdev: pointer to kbase device
  56. * @kctx: pointer to kbase context
  57. * @vmap: vinstr vmap for mapping hwcnt dump buffer
  58. * @gpu_va: GPU hwcnt dump buffer address
  59. * @cpu_va: the CPU side mapping of the hwcnt dump buffer
  60. * @dump_size: size of the dump buffer in bytes
  61. * @bitmap: current set of counters monitored, not always in sync
  62. * with hardware
  63. * @reprogram: when true, reprogram hwcnt block with the new set of
  64. * counters
  65. * @suspended: when true, the context has been suspended
  66. * @nclients: number of attached clients, pending or otherwise
  67. * @waiting_clients: head of list of clients being periodically sampled
  68. * @idle_clients: head of list of clients being idle
  69. * @suspended_clients: head of list of clients being suspended
  70. * @thread: periodic sampling thread
  71. * @waitq: notification queue of sampling thread
  72. * @request_pending: request for action for sampling thread
  73. */
  74. struct kbase_vinstr_context {
  75. struct mutex lock;
  76. struct kbase_device *kbdev;
  77. struct kbase_context *kctx;
  78. struct kbase_vmap_struct vmap;
  79. u64 gpu_va;
  80. void *cpu_va;
  81. size_t dump_size;
  82. u32 bitmap[4];
  83. bool reprogram;
  84. bool suspended;
  85. u32 nclients;
  86. struct list_head waiting_clients;
  87. struct list_head idle_clients;
  88. struct list_head suspended_clients;
  89. struct task_struct *thread;
  90. wait_queue_head_t waitq;
  91. atomic_t request_pending;
  92. };
  93. /**
  94. * struct kbase_vinstr_client - a vinstr client attached to a vinstr context
  95. * @vinstr_ctx: vinstr context client is attached to
  96. * @list: node used to attach this client to list in vinstr context
  97. * @buffer_count: number of buffers this client is using
  98. * @event_mask: events this client reacts to
  99. * @dump_size: size of one dump buffer in bytes
  100. * @bitmap: bitmap request for JM, TILER, SHADER and MMU counters
  101. * @legacy_buffer: userspace hwcnt dump buffer (legacy interface)
  102. * @kernel_buffer: kernel hwcnt dump buffer (kernel client interface)
  103. * @accum_buffer: temporary accumulation buffer for preserving counters
  104. * @dump_time: next time this clients shall request hwcnt dump
  105. * @dump_interval: interval between periodic hwcnt dumps
  106. * @dump_buffers: kernel hwcnt dump buffers allocated by this client
  107. * @dump_buffers_meta: metadata of dump buffers
  108. * @meta_idx: index of metadata being accessed by userspace
  109. * @read_idx: index of buffer read by userspace
  110. * @write_idx: index of buffer being written by dumping service
  111. * @waitq: client's notification queue
  112. * @pending: when true, client has attached but hwcnt not yet updated
  113. */
  114. struct kbase_vinstr_client {
  115. struct kbase_vinstr_context *vinstr_ctx;
  116. struct list_head list;
  117. unsigned int buffer_count;
  118. u32 event_mask;
  119. size_t dump_size;
  120. u32 bitmap[4];
  121. void __user *legacy_buffer;
  122. void *kernel_buffer;
  123. void *accum_buffer;
  124. u64 dump_time;
  125. u32 dump_interval;
  126. char *dump_buffers;
  127. struct kbase_hwcnt_reader_metadata *dump_buffers_meta;
  128. atomic_t meta_idx;
  129. atomic_t read_idx;
  130. atomic_t write_idx;
  131. wait_queue_head_t waitq;
  132. bool pending;
  133. };
  134. /**
  135. * struct kbasep_vinstr_wake_up_timer - vinstr service thread wake up timer
  136. * @hrtimer: high resolution timer
  137. * @vinstr_ctx: vinstr context
  138. */
  139. struct kbasep_vinstr_wake_up_timer {
  140. struct hrtimer hrtimer;
  141. struct kbase_vinstr_context *vinstr_ctx;
  142. };
  143. /*****************************************************************************/
  144. static int kbasep_vinstr_service_task(void *data);
  145. static unsigned int kbasep_vinstr_hwcnt_reader_poll(
  146. struct file *filp,
  147. poll_table *wait);
  148. static long kbasep_vinstr_hwcnt_reader_ioctl(
  149. struct file *filp,
  150. unsigned int cmd,
  151. unsigned long arg);
  152. static int kbasep_vinstr_hwcnt_reader_mmap(
  153. struct file *filp,
  154. struct vm_area_struct *vma);
  155. static int kbasep_vinstr_hwcnt_reader_release(
  156. struct inode *inode,
  157. struct file *filp);
  158. /* The timeline stream file operations structure. */
  159. static const struct file_operations vinstr_client_fops = {
  160. .poll = kbasep_vinstr_hwcnt_reader_poll,
  161. .unlocked_ioctl = kbasep_vinstr_hwcnt_reader_ioctl,
  162. .compat_ioctl = kbasep_vinstr_hwcnt_reader_ioctl,
  163. .mmap = kbasep_vinstr_hwcnt_reader_mmap,
  164. .release = kbasep_vinstr_hwcnt_reader_release,
  165. };
  166. /*****************************************************************************/
  167. static int enable_hwcnt(struct kbase_vinstr_context *vinstr_ctx)
  168. {
  169. struct kbase_uk_hwcnt_setup setup;
  170. setup.dump_buffer = vinstr_ctx->gpu_va;
  171. setup.jm_bm = vinstr_ctx->bitmap[JM_HWCNT_BM];
  172. setup.tiler_bm = vinstr_ctx->bitmap[TILER_HWCNT_BM];
  173. setup.shader_bm = vinstr_ctx->bitmap[SHADER_HWCNT_BM];
  174. setup.mmu_l2_bm = vinstr_ctx->bitmap[MMU_L2_HWCNT_BM];
  175. return kbase_instr_hwcnt_enable(vinstr_ctx->kctx, &setup);
  176. }
  177. static void disable_hwcnt(struct kbase_vinstr_context *vinstr_ctx)
  178. {
  179. kbase_instr_hwcnt_disable(vinstr_ctx->kctx);
  180. }
  181. static int reprogram_hwcnt(struct kbase_vinstr_context *vinstr_ctx)
  182. {
  183. disable_hwcnt(vinstr_ctx);
  184. return enable_hwcnt(vinstr_ctx);
  185. }
  186. static void hwcnt_bitmap_set(u32 dst[4], u32 src[4])
  187. {
  188. dst[JM_HWCNT_BM] = src[JM_HWCNT_BM];
  189. dst[TILER_HWCNT_BM] = src[TILER_HWCNT_BM];
  190. dst[SHADER_HWCNT_BM] = src[SHADER_HWCNT_BM];
  191. dst[MMU_L2_HWCNT_BM] = src[MMU_L2_HWCNT_BM];
  192. }
  193. static void hwcnt_bitmap_union(u32 dst[4], u32 src[4])
  194. {
  195. dst[JM_HWCNT_BM] |= src[JM_HWCNT_BM];
  196. dst[TILER_HWCNT_BM] |= src[TILER_HWCNT_BM];
  197. dst[SHADER_HWCNT_BM] |= src[SHADER_HWCNT_BM];
  198. dst[MMU_L2_HWCNT_BM] |= src[MMU_L2_HWCNT_BM];
  199. }
  200. size_t kbase_vinstr_dump_size(struct kbase_device *kbdev)
  201. {
  202. size_t dump_size;
  203. if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_V4)) {
  204. u32 nr_cg;
  205. nr_cg = kbdev->gpu_props.num_core_groups;
  206. dump_size = nr_cg * NR_CNT_BLOCKS_PER_GROUP *
  207. NR_CNT_PER_BLOCK *
  208. NR_BYTES_PER_CNT;
  209. } else
  210. {
  211. /* assume v5 for now */
  212. base_gpu_props *props = &kbdev->gpu_props.props;
  213. u32 nr_l2 = props->l2_props.num_l2_slices;
  214. u64 core_mask = props->coherency_info.group[0].core_mask;
  215. u32 nr_blocks = fls64(core_mask);
  216. /* JM and tiler counter blocks are always present */
  217. dump_size = (2 + nr_l2 + nr_blocks) *
  218. NR_CNT_PER_BLOCK *
  219. NR_BYTES_PER_CNT;
  220. }
  221. return dump_size;
  222. }
  223. static size_t kbasep_vinstr_dump_size_ctx(
  224. struct kbase_vinstr_context *vinstr_ctx)
  225. {
  226. return kbase_vinstr_dump_size(vinstr_ctx->kctx->kbdev);
  227. }
  228. static int kbasep_vinstr_map_kernel_dump_buffer(
  229. struct kbase_vinstr_context *vinstr_ctx)
  230. {
  231. struct kbase_va_region *reg;
  232. struct kbase_context *kctx = vinstr_ctx->kctx;
  233. u64 flags, nr_pages;
  234. u16 va_align = 0;
  235. flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_WR;
  236. vinstr_ctx->dump_size = kbasep_vinstr_dump_size_ctx(vinstr_ctx);
  237. nr_pages = PFN_UP(vinstr_ctx->dump_size);
  238. reg = kbase_mem_alloc(kctx, nr_pages, nr_pages, 0, &flags,
  239. &vinstr_ctx->gpu_va, &va_align);
  240. if (!reg)
  241. return -ENOMEM;
  242. vinstr_ctx->cpu_va = kbase_vmap(
  243. kctx,
  244. vinstr_ctx->gpu_va,
  245. vinstr_ctx->dump_size,
  246. &vinstr_ctx->vmap);
  247. if (!vinstr_ctx->cpu_va) {
  248. kbase_mem_free(kctx, vinstr_ctx->gpu_va);
  249. return -ENOMEM;
  250. }
  251. return 0;
  252. }
  253. static void kbasep_vinstr_unmap_kernel_dump_buffer(
  254. struct kbase_vinstr_context *vinstr_ctx)
  255. {
  256. struct kbase_context *kctx = vinstr_ctx->kctx;
  257. kbase_vunmap(kctx, &vinstr_ctx->vmap);
  258. kbase_mem_free(kctx, vinstr_ctx->gpu_va);
  259. }
  260. /**
  261. * kbasep_vinstr_create_kctx - create kernel context for vinstr
  262. * @vinstr_ctx: vinstr context
  263. * Return: zero on success
  264. */
  265. static int kbasep_vinstr_create_kctx(struct kbase_vinstr_context *vinstr_ctx)
  266. {
  267. struct kbase_device *kbdev = vinstr_ctx->kbdev;
  268. struct kbasep_kctx_list_element *element;
  269. int err;
  270. vinstr_ctx->kctx = kbase_create_context(vinstr_ctx->kbdev, true);
  271. if (!vinstr_ctx->kctx)
  272. return -ENOMEM;
  273. /* Map the master kernel dump buffer. The HW dumps the counters
  274. * into this memory region. */
  275. err = kbasep_vinstr_map_kernel_dump_buffer(vinstr_ctx);
  276. if (err) {
  277. kbase_destroy_context(vinstr_ctx->kctx);
  278. vinstr_ctx->kctx = NULL;
  279. return err;
  280. }
  281. /* Add kernel context to list of contexts associated with device. */
  282. element = kzalloc(sizeof(*element), GFP_KERNEL);
  283. if (element) {
  284. element->kctx = vinstr_ctx->kctx;
  285. mutex_lock(&kbdev->kctx_list_lock);
  286. list_add(&element->link, &kbdev->kctx_list);
  287. /* Inform timeline client about new context.
  288. * Do this while holding the lock to avoid tracepoint
  289. * being created in both body and summary stream. */
  290. kbase_tlstream_tl_new_ctx(
  291. vinstr_ctx->kctx,
  292. (u32)(vinstr_ctx->kctx->id),
  293. (u32)(vinstr_ctx->kctx->tgid));
  294. mutex_unlock(&kbdev->kctx_list_lock);
  295. } else {
  296. /* Don't treat this as a fail - just warn about it. */
  297. dev_warn(kbdev->dev,
  298. "couldn't add kctx to kctx_list\n");
  299. }
  300. err = enable_hwcnt(vinstr_ctx);
  301. if (err) {
  302. kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx);
  303. kbase_destroy_context(vinstr_ctx->kctx);
  304. if (element) {
  305. mutex_lock(&kbdev->kctx_list_lock);
  306. list_del(&element->link);
  307. kfree(element);
  308. mutex_unlock(&kbdev->kctx_list_lock);
  309. }
  310. kbase_tlstream_tl_del_ctx(vinstr_ctx->kctx);
  311. vinstr_ctx->kctx = NULL;
  312. return err;
  313. }
  314. vinstr_ctx->thread = kthread_run(
  315. kbasep_vinstr_service_task,
  316. vinstr_ctx,
  317. "mali_vinstr_service");
  318. if (!vinstr_ctx->thread) {
  319. disable_hwcnt(vinstr_ctx);
  320. kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx);
  321. kbase_destroy_context(vinstr_ctx->kctx);
  322. if (element) {
  323. mutex_lock(&kbdev->kctx_list_lock);
  324. list_del(&element->link);
  325. kfree(element);
  326. mutex_unlock(&kbdev->kctx_list_lock);
  327. }
  328. kbase_tlstream_tl_del_ctx(vinstr_ctx->kctx);
  329. vinstr_ctx->kctx = NULL;
  330. return -EFAULT;
  331. }
  332. return 0;
  333. }
  334. /**
  335. * kbasep_vinstr_destroy_kctx - destroy vinstr's kernel context
  336. * @vinstr_ctx: vinstr context
  337. */
  338. static void kbasep_vinstr_destroy_kctx(struct kbase_vinstr_context *vinstr_ctx)
  339. {
  340. struct kbase_device *kbdev = vinstr_ctx->kbdev;
  341. struct kbasep_kctx_list_element *element;
  342. struct kbasep_kctx_list_element *tmp;
  343. bool found = false;
  344. /* Release hw counters dumping resources. */
  345. vinstr_ctx->thread = NULL;
  346. disable_hwcnt(vinstr_ctx);
  347. kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx);
  348. kbase_destroy_context(vinstr_ctx->kctx);
  349. /* Remove kernel context from the device's contexts list. */
  350. mutex_lock(&kbdev->kctx_list_lock);
  351. list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
  352. if (element->kctx == vinstr_ctx->kctx) {
  353. list_del(&element->link);
  354. kfree(element);
  355. found = true;
  356. }
  357. }
  358. mutex_unlock(&kbdev->kctx_list_lock);
  359. if (!found)
  360. dev_warn(kbdev->dev, "kctx not in kctx_list\n");
  361. /* Inform timeline client about context destruction. */
  362. kbase_tlstream_tl_del_ctx(vinstr_ctx->kctx);
  363. vinstr_ctx->kctx = NULL;
  364. }
  365. /**
  366. * kbasep_vinstr_attach_client - Attach a client to the vinstr core
  367. * @vinstr_ctx: vinstr context
  368. * @buffer_count: requested number of dump buffers
  369. * @bitmap: bitmaps describing which counters should be enabled
  370. * @argp: pointer where notification descriptor shall be stored
  371. * @kernel_buffer: pointer to kernel side buffer
  372. *
  373. * Return: vinstr opaque client handle or NULL on failure
  374. */
  375. static struct kbase_vinstr_client *kbasep_vinstr_attach_client(
  376. struct kbase_vinstr_context *vinstr_ctx, u32 buffer_count,
  377. u32 bitmap[4], void *argp, void *kernel_buffer)
  378. {
  379. struct task_struct *thread = NULL;
  380. struct kbase_vinstr_client *cli;
  381. KBASE_DEBUG_ASSERT(vinstr_ctx);
  382. if (buffer_count > MAX_BUFFER_COUNT
  383. || (buffer_count & (buffer_count - 1)))
  384. return NULL;
  385. cli = kzalloc(sizeof(*cli), GFP_KERNEL);
  386. if (!cli)
  387. return NULL;
  388. cli->vinstr_ctx = vinstr_ctx;
  389. cli->buffer_count = buffer_count;
  390. cli->event_mask =
  391. (1 << BASE_HWCNT_READER_EVENT_MANUAL) |
  392. (1 << BASE_HWCNT_READER_EVENT_PERIODIC);
  393. cli->pending = true;
  394. hwcnt_bitmap_set(cli->bitmap, bitmap);
  395. mutex_lock(&vinstr_ctx->lock);
  396. hwcnt_bitmap_union(vinstr_ctx->bitmap, cli->bitmap);
  397. vinstr_ctx->reprogram = true;
  398. /* If this is the first client, create the vinstr kbase
  399. * context. This context is permanently resident until the
  400. * last client exits. */
  401. if (!vinstr_ctx->nclients) {
  402. hwcnt_bitmap_set(vinstr_ctx->bitmap, cli->bitmap);
  403. if (kbasep_vinstr_create_kctx(vinstr_ctx) < 0)
  404. goto error;
  405. vinstr_ctx->reprogram = false;
  406. cli->pending = false;
  407. }
  408. /* The GPU resets the counter block every time there is a request
  409. * to dump it. We need a per client kernel buffer for accumulating
  410. * the counters. */
  411. cli->dump_size = kbasep_vinstr_dump_size_ctx(vinstr_ctx);
  412. cli->accum_buffer = kzalloc(cli->dump_size, GFP_KERNEL);
  413. if (!cli->accum_buffer)
  414. goto error;
  415. /* Prepare buffers. */
  416. if (cli->buffer_count) {
  417. int *fd = (int *)argp;
  418. size_t tmp;
  419. /* Allocate area for buffers metadata storage. */
  420. tmp = sizeof(struct kbase_hwcnt_reader_metadata) *
  421. cli->buffer_count;
  422. cli->dump_buffers_meta = kmalloc(tmp, GFP_KERNEL);
  423. if (!cli->dump_buffers_meta)
  424. goto error;
  425. /* Allocate required number of dumping buffers. */
  426. cli->dump_buffers = (char *)__get_free_pages(
  427. GFP_KERNEL | __GFP_ZERO,
  428. get_order(cli->dump_size * cli->buffer_count));
  429. if (!cli->dump_buffers)
  430. goto error;
  431. /* Create descriptor for user-kernel data exchange. */
  432. *fd = anon_inode_getfd(
  433. "[mali_vinstr_desc]",
  434. &vinstr_client_fops,
  435. cli,
  436. O_RDONLY | O_CLOEXEC);
  437. if (*fd < 0)
  438. goto error;
  439. } else if (kernel_buffer) {
  440. cli->kernel_buffer = kernel_buffer;
  441. } else {
  442. cli->legacy_buffer = (void __user *)argp;
  443. }
  444. atomic_set(&cli->read_idx, 0);
  445. atomic_set(&cli->meta_idx, 0);
  446. atomic_set(&cli->write_idx, 0);
  447. init_waitqueue_head(&cli->waitq);
  448. vinstr_ctx->nclients++;
  449. list_add(&cli->list, &vinstr_ctx->idle_clients);
  450. mutex_unlock(&vinstr_ctx->lock);
  451. return cli;
  452. error:
  453. kfree(cli->dump_buffers_meta);
  454. if (cli->dump_buffers)
  455. free_pages(
  456. (unsigned long)cli->dump_buffers,
  457. get_order(cli->dump_size * cli->buffer_count));
  458. kfree(cli->accum_buffer);
  459. if (!vinstr_ctx->nclients && vinstr_ctx->kctx) {
  460. thread = vinstr_ctx->thread;
  461. kbasep_vinstr_destroy_kctx(vinstr_ctx);
  462. }
  463. kfree(cli);
  464. mutex_unlock(&vinstr_ctx->lock);
  465. /* Thread must be stopped after lock is released. */
  466. if (thread)
  467. kthread_stop(thread);
  468. return NULL;
  469. }
  470. void kbase_vinstr_detach_client(struct kbase_vinstr_client *cli)
  471. {
  472. struct kbase_vinstr_context *vinstr_ctx;
  473. struct kbase_vinstr_client *iter, *tmp;
  474. struct task_struct *thread = NULL;
  475. u32 zerobitmap[4] = { 0 };
  476. int cli_found = 0;
  477. KBASE_DEBUG_ASSERT(cli);
  478. vinstr_ctx = cli->vinstr_ctx;
  479. KBASE_DEBUG_ASSERT(vinstr_ctx);
  480. mutex_lock(&vinstr_ctx->lock);
  481. list_for_each_entry_safe(iter, tmp, &vinstr_ctx->idle_clients, list) {
  482. if (iter == cli) {
  483. vinstr_ctx->reprogram = true;
  484. cli_found = 1;
  485. list_del(&iter->list);
  486. break;
  487. }
  488. }
  489. if (!cli_found) {
  490. list_for_each_entry_safe(
  491. iter, tmp, &vinstr_ctx->waiting_clients, list) {
  492. if (iter == cli) {
  493. vinstr_ctx->reprogram = true;
  494. cli_found = 1;
  495. list_del(&iter->list);
  496. break;
  497. }
  498. }
  499. }
  500. KBASE_DEBUG_ASSERT(cli_found);
  501. kfree(cli->dump_buffers_meta);
  502. free_pages(
  503. (unsigned long)cli->dump_buffers,
  504. get_order(cli->dump_size * cli->buffer_count));
  505. kfree(cli->accum_buffer);
  506. kfree(cli);
  507. vinstr_ctx->nclients--;
  508. if (!vinstr_ctx->nclients) {
  509. thread = vinstr_ctx->thread;
  510. kbasep_vinstr_destroy_kctx(vinstr_ctx);
  511. }
  512. /* Rebuild context bitmap now that the client has detached */
  513. hwcnt_bitmap_set(vinstr_ctx->bitmap, zerobitmap);
  514. list_for_each_entry(iter, &vinstr_ctx->idle_clients, list)
  515. hwcnt_bitmap_union(vinstr_ctx->bitmap, iter->bitmap);
  516. list_for_each_entry(iter, &vinstr_ctx->waiting_clients, list)
  517. hwcnt_bitmap_union(vinstr_ctx->bitmap, iter->bitmap);
  518. mutex_unlock(&vinstr_ctx->lock);
  519. /* Thread must be stopped after lock is released. */
  520. if (thread)
  521. kthread_stop(thread);
  522. }
  523. /* Accumulate counters in the dump buffer */
  524. static void accum_dump_buffer(void *dst, void *src, size_t dump_size)
  525. {
  526. size_t block_size = NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT;
  527. u32 *d = dst;
  528. u32 *s = src;
  529. size_t i, j;
  530. for (i = 0; i < dump_size; i += block_size) {
  531. /* skip over the header block */
  532. d += NR_BYTES_PER_HDR / sizeof(u32);
  533. s += NR_BYTES_PER_HDR / sizeof(u32);
  534. for (j = 0; j < (block_size - NR_BYTES_PER_HDR) / sizeof(u32); j++) {
  535. /* saturate result if addition would result in wraparound */
  536. if (U32_MAX - *d < *s)
  537. *d = U32_MAX;
  538. else
  539. *d += *s;
  540. d++;
  541. s++;
  542. }
  543. }
  544. }
  545. /* This is the Midgard v4 patch function. It copies the headers for each
  546. * of the defined blocks from the master kernel buffer and then patches up
  547. * the performance counter enable mask for each of the blocks to exclude
  548. * counters that were not requested by the client. */
  549. static void patch_dump_buffer_hdr_v4(
  550. struct kbase_vinstr_context *vinstr_ctx,
  551. struct kbase_vinstr_client *cli)
  552. {
  553. u32 *mask;
  554. u8 *dst = cli->accum_buffer;
  555. u8 *src = vinstr_ctx->cpu_va;
  556. u32 nr_cg = vinstr_ctx->kctx->kbdev->gpu_props.num_core_groups;
  557. size_t i, group_size, group;
  558. enum {
  559. SC0_BASE = 0 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
  560. SC1_BASE = 1 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
  561. SC2_BASE = 2 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
  562. SC3_BASE = 3 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
  563. TILER_BASE = 4 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
  564. MMU_L2_BASE = 5 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
  565. JM_BASE = 7 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT
  566. };
  567. group_size = NR_CNT_BLOCKS_PER_GROUP *
  568. NR_CNT_PER_BLOCK *
  569. NR_BYTES_PER_CNT;
  570. for (i = 0; i < nr_cg; i++) {
  571. group = i * group_size;
  572. /* copy shader core headers */
  573. memcpy(&dst[group + SC0_BASE], &src[group + SC0_BASE],
  574. NR_BYTES_PER_HDR);
  575. memcpy(&dst[group + SC1_BASE], &src[group + SC1_BASE],
  576. NR_BYTES_PER_HDR);
  577. memcpy(&dst[group + SC2_BASE], &src[group + SC2_BASE],
  578. NR_BYTES_PER_HDR);
  579. memcpy(&dst[group + SC3_BASE], &src[group + SC3_BASE],
  580. NR_BYTES_PER_HDR);
  581. /* copy tiler header */
  582. memcpy(&dst[group + TILER_BASE], &src[group + TILER_BASE],
  583. NR_BYTES_PER_HDR);
  584. /* copy mmu header */
  585. memcpy(&dst[group + MMU_L2_BASE], &src[group + MMU_L2_BASE],
  586. NR_BYTES_PER_HDR);
  587. /* copy job manager header */
  588. memcpy(&dst[group + JM_BASE], &src[group + JM_BASE],
  589. NR_BYTES_PER_HDR);
  590. /* patch the shader core enable mask */
  591. mask = (u32 *)&dst[group + SC0_BASE + PRFCNT_EN_MASK_OFFSET];
  592. *mask &= cli->bitmap[SHADER_HWCNT_BM];
  593. mask = (u32 *)&dst[group + SC1_BASE + PRFCNT_EN_MASK_OFFSET];
  594. *mask &= cli->bitmap[SHADER_HWCNT_BM];
  595. mask = (u32 *)&dst[group + SC2_BASE + PRFCNT_EN_MASK_OFFSET];
  596. *mask &= cli->bitmap[SHADER_HWCNT_BM];
  597. mask = (u32 *)&dst[group + SC3_BASE + PRFCNT_EN_MASK_OFFSET];
  598. *mask &= cli->bitmap[SHADER_HWCNT_BM];
  599. /* patch the tiler core enable mask */
  600. mask = (u32 *)&dst[group + TILER_BASE + PRFCNT_EN_MASK_OFFSET];
  601. *mask &= cli->bitmap[TILER_HWCNT_BM];
  602. /* patch the mmu core enable mask */
  603. mask = (u32 *)&dst[group + MMU_L2_BASE + PRFCNT_EN_MASK_OFFSET];
  604. *mask &= cli->bitmap[MMU_L2_HWCNT_BM];
  605. /* patch the job manager enable mask */
  606. mask = (u32 *)&dst[group + JM_BASE + PRFCNT_EN_MASK_OFFSET];
  607. *mask &= cli->bitmap[JM_HWCNT_BM];
  608. }
  609. }
  610. /* This is the Midgard v5 patch function. It copies the headers for each
  611. * of the defined blocks from the master kernel buffer and then patches up
  612. * the performance counter enable mask for each of the blocks to exclude
  613. * counters that were not requested by the client. */
  614. static void patch_dump_buffer_hdr_v5(
  615. struct kbase_vinstr_context *vinstr_ctx,
  616. struct kbase_vinstr_client *cli)
  617. {
  618. struct kbase_device *kbdev = vinstr_ctx->kctx->kbdev;
  619. u32 i, nr_l2;
  620. u64 core_mask;
  621. u32 *mask;
  622. u8 *dst = cli->accum_buffer;
  623. u8 *src = vinstr_ctx->cpu_va;
  624. size_t block_size = NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT;
  625. /* copy and patch job manager header */
  626. memcpy(dst, src, NR_BYTES_PER_HDR);
  627. mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET];
  628. *mask &= cli->bitmap[JM_HWCNT_BM];
  629. dst += block_size;
  630. src += block_size;
  631. /* copy and patch tiler header */
  632. memcpy(dst, src, NR_BYTES_PER_HDR);
  633. mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET];
  634. *mask &= cli->bitmap[TILER_HWCNT_BM];
  635. dst += block_size;
  636. src += block_size;
  637. /* copy and patch MMU/L2C headers */
  638. nr_l2 = kbdev->gpu_props.props.l2_props.num_l2_slices;
  639. for (i = 0; i < nr_l2; i++) {
  640. memcpy(dst, src, NR_BYTES_PER_HDR);
  641. mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET];
  642. *mask &= cli->bitmap[MMU_L2_HWCNT_BM];
  643. dst += block_size;
  644. src += block_size;
  645. }
  646. /* copy and patch shader core headers */
  647. core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask;
  648. while (core_mask != 0ull) {
  649. memcpy(dst, src, NR_BYTES_PER_HDR);
  650. if (0ull != (core_mask & 1ull)) {
  651. /* if block is not reserved update header */
  652. mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET];
  653. *mask &= cli->bitmap[SHADER_HWCNT_BM];
  654. }
  655. dst += block_size;
  656. src += block_size;
  657. core_mask >>= 1;
  658. }
  659. }
  660. /**
  661. * accum_clients - accumulate dumped hw counters for all known clients
  662. * @vinstr_ctx: vinstr context
  663. */
  664. static void accum_clients(struct kbase_vinstr_context *vinstr_ctx)
  665. {
  666. struct kbase_vinstr_client *iter;
  667. int v4 = 0;
  668. v4 = kbase_hw_has_feature(vinstr_ctx->kbdev, BASE_HW_FEATURE_V4);
  669. list_for_each_entry(iter, &vinstr_ctx->idle_clients, list) {
  670. /* Don't bother accumulating clients whose hwcnt requests
  671. * have not yet been honoured. */
  672. if (iter->pending)
  673. continue;
  674. if (v4)
  675. patch_dump_buffer_hdr_v4(vinstr_ctx, iter);
  676. else
  677. patch_dump_buffer_hdr_v5(vinstr_ctx, iter);
  678. accum_dump_buffer(
  679. iter->accum_buffer,
  680. vinstr_ctx->cpu_va,
  681. iter->dump_size);
  682. }
  683. list_for_each_entry(iter, &vinstr_ctx->waiting_clients, list) {
  684. /* Don't bother accumulating clients whose hwcnt requests
  685. * have not yet been honoured. */
  686. if (iter->pending)
  687. continue;
  688. if (v4)
  689. patch_dump_buffer_hdr_v4(vinstr_ctx, iter);
  690. else
  691. patch_dump_buffer_hdr_v5(vinstr_ctx, iter);
  692. accum_dump_buffer(
  693. iter->accum_buffer,
  694. vinstr_ctx->cpu_va,
  695. iter->dump_size);
  696. }
  697. }
  698. /*****************************************************************************/
  699. /**
  700. * kbasep_vinstr_get_timestamp - return timestamp
  701. *
  702. * Function returns timestamp value based on raw monotonic timer. Value will
  703. * wrap around zero in case of overflow.
  704. *
  705. * Return: timestamp value
  706. */
  707. static u64 kbasep_vinstr_get_timestamp(void)
  708. {
  709. struct timespec ts;
  710. getrawmonotonic(&ts);
  711. return (u64)ts.tv_sec * NSECS_IN_SEC + ts.tv_nsec;
  712. }
  713. /**
  714. * kbasep_vinstr_add_dump_request - register client's dumping request
  715. * @cli: requesting client
  716. * @waiting_clients: list of pending dumping requests
  717. */
  718. static void kbasep_vinstr_add_dump_request(
  719. struct kbase_vinstr_client *cli,
  720. struct list_head *waiting_clients)
  721. {
  722. struct kbase_vinstr_client *tmp;
  723. if (list_empty(waiting_clients)) {
  724. list_add(&cli->list, waiting_clients);
  725. return;
  726. }
  727. list_for_each_entry(tmp, waiting_clients, list) {
  728. if (tmp->dump_time > cli->dump_time) {
  729. list_add_tail(&cli->list, &tmp->list);
  730. return;
  731. }
  732. }
  733. list_add_tail(&cli->list, waiting_clients);
  734. }
  735. /**
  736. * kbasep_vinstr_collect_and_accumulate - collect hw counters via low level
  737. * dump and accumulate them for known
  738. * clients
  739. * @vinstr_ctx: vinstr context
  740. * @timestamp: pointer where collection timestamp will be recorded
  741. *
  742. * Return: zero on success
  743. */
  744. static int kbasep_vinstr_collect_and_accumulate(
  745. struct kbase_vinstr_context *vinstr_ctx, u64 *timestamp)
  746. {
  747. int rcode;
  748. /* Request HW counters dump.
  749. * Disable preemption to make dump timestamp more accurate. */
  750. preempt_disable();
  751. *timestamp = kbasep_vinstr_get_timestamp();
  752. rcode = kbase_instr_hwcnt_request_dump(vinstr_ctx->kctx);
  753. preempt_enable();
  754. if (!rcode)
  755. rcode = kbase_instr_hwcnt_wait_for_dump(vinstr_ctx->kctx);
  756. WARN_ON(rcode);
  757. /* Accumulate values of collected counters. */
  758. if (!rcode)
  759. accum_clients(vinstr_ctx);
  760. return rcode;
  761. }
  762. /**
  763. * kbasep_vinstr_fill_dump_buffer - copy accumulated counters to empty kernel
  764. * buffer
  765. * @cli: requesting client
  766. * @timestamp: timestamp when counters were collected
  767. * @event_id: id of event that caused triggered counters collection
  768. *
  769. * Return: zero on success
  770. */
  771. static int kbasep_vinstr_fill_dump_buffer(
  772. struct kbase_vinstr_client *cli, u64 timestamp,
  773. enum base_hwcnt_reader_event event_id)
  774. {
  775. unsigned int write_idx = atomic_read(&cli->write_idx);
  776. unsigned int read_idx = atomic_read(&cli->read_idx);
  777. struct kbase_hwcnt_reader_metadata *meta;
  778. void *buffer;
  779. /* Check if there is a place to copy HWC block into. */
  780. if (write_idx - read_idx == cli->buffer_count)
  781. return -1;
  782. write_idx %= cli->buffer_count;
  783. /* Fill in dump buffer and its metadata. */
  784. buffer = &cli->dump_buffers[write_idx * cli->dump_size];
  785. meta = &cli->dump_buffers_meta[write_idx];
  786. meta->timestamp = timestamp;
  787. meta->event_id = event_id;
  788. meta->buffer_idx = write_idx;
  789. memcpy(buffer, cli->accum_buffer, cli->dump_size);
  790. return 0;
  791. }
  792. /**
  793. * kbasep_vinstr_fill_dump_buffer_legacy - copy accumulated counters to buffer
  794. * allocated in userspace
  795. * @cli: requesting client
  796. *
  797. * Return: zero on success
  798. *
  799. * This is part of legacy ioctl interface.
  800. */
  801. static int kbasep_vinstr_fill_dump_buffer_legacy(
  802. struct kbase_vinstr_client *cli)
  803. {
  804. void __user *buffer = cli->legacy_buffer;
  805. int rcode;
  806. /* Copy data to user buffer. */
  807. rcode = copy_to_user(buffer, cli->accum_buffer, cli->dump_size);
  808. if (rcode)
  809. pr_warn("error while copying buffer to user\n");
  810. return rcode;
  811. }
  812. /**
  813. * kbasep_vinstr_fill_dump_buffer_kernel - copy accumulated counters to buffer
  814. * allocated in kernel space
  815. * @cli: requesting client
  816. *
  817. * Return: zero on success
  818. *
  819. * This is part of the kernel client interface.
  820. */
  821. static int kbasep_vinstr_fill_dump_buffer_kernel(
  822. struct kbase_vinstr_client *cli)
  823. {
  824. memcpy(cli->kernel_buffer, cli->accum_buffer, cli->dump_size);
  825. return 0;
  826. }
  827. /**
  828. * kbasep_vinstr_reprogram - reprogram hwcnt set collected by inst
  829. * @vinstr_ctx: vinstr context
  830. */
  831. static void kbasep_vinstr_reprogram(
  832. struct kbase_vinstr_context *vinstr_ctx)
  833. {
  834. if (vinstr_ctx->reprogram) {
  835. struct kbase_vinstr_client *iter;
  836. if (!reprogram_hwcnt(vinstr_ctx)) {
  837. vinstr_ctx->reprogram = false;
  838. list_for_each_entry(
  839. iter,
  840. &vinstr_ctx->idle_clients,
  841. list)
  842. iter->pending = false;
  843. list_for_each_entry(
  844. iter,
  845. &vinstr_ctx->waiting_clients,
  846. list)
  847. iter->pending = false;
  848. }
  849. }
  850. }
  851. /**
  852. * kbasep_vinstr_update_client - copy accumulated counters to user readable
  853. * buffer and notify the user
  854. * @cli: requesting client
  855. * @timestamp: timestamp when counters were collected
  856. * @event_id: id of event that caused triggered counters collection
  857. *
  858. * Return: zero on success
  859. */
  860. static int kbasep_vinstr_update_client(
  861. struct kbase_vinstr_client *cli, u64 timestamp,
  862. enum base_hwcnt_reader_event event_id)
  863. {
  864. int rcode = 0;
  865. /* Copy collected counters to user readable buffer. */
  866. if (cli->buffer_count)
  867. rcode = kbasep_vinstr_fill_dump_buffer(
  868. cli, timestamp, event_id);
  869. else if (cli->kernel_buffer)
  870. rcode = kbasep_vinstr_fill_dump_buffer_kernel(cli);
  871. else
  872. rcode = kbasep_vinstr_fill_dump_buffer_legacy(cli);
  873. if (rcode)
  874. goto exit;
  875. /* Notify client. Make sure all changes to memory are visible. */
  876. wmb();
  877. atomic_inc(&cli->write_idx);
  878. wake_up_interruptible(&cli->waitq);
  879. /* Prepare for next request. */
  880. memset(cli->accum_buffer, 0, cli->dump_size);
  881. exit:
  882. return rcode;
  883. }
  884. /**
  885. * kbasep_vinstr_wake_up_callback - vinstr wake up timer wake up function
  886. *
  887. * @hrtimer: high resolution timer
  888. *
  889. * Return: High resolution timer restart enum.
  890. */
  891. static enum hrtimer_restart kbasep_vinstr_wake_up_callback(
  892. struct hrtimer *hrtimer)
  893. {
  894. struct kbasep_vinstr_wake_up_timer *timer =
  895. container_of(
  896. hrtimer,
  897. struct kbasep_vinstr_wake_up_timer,
  898. hrtimer);
  899. KBASE_DEBUG_ASSERT(timer);
  900. atomic_set(&timer->vinstr_ctx->request_pending, 1);
  901. wake_up_all(&timer->vinstr_ctx->waitq);
  902. return HRTIMER_NORESTART;
  903. }
  904. /**
  905. * kbasep_vinstr_service_task - HWC dumping service thread
  906. *
  907. * @data: Pointer to vinstr context structure.
  908. *
  909. * Return: Always returns zero.
  910. */
  911. static int kbasep_vinstr_service_task(void *data)
  912. {
  913. struct kbase_vinstr_context *vinstr_ctx = data;
  914. struct kbasep_vinstr_wake_up_timer timer;
  915. KBASE_DEBUG_ASSERT(vinstr_ctx);
  916. hrtimer_init(&timer.hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  917. timer.hrtimer.function = kbasep_vinstr_wake_up_callback;
  918. timer.vinstr_ctx = vinstr_ctx;
  919. while (!kthread_should_stop()) {
  920. struct kbase_vinstr_client *cli = NULL;
  921. struct kbase_vinstr_client *tmp;
  922. u64 timestamp = kbasep_vinstr_get_timestamp();
  923. u64 dump_time = 0;
  924. struct list_head expired_requests;
  925. /* Hold lock while performing operations on lists of clients. */
  926. mutex_lock(&vinstr_ctx->lock);
  927. /* Closing thread must not interact with client requests. */
  928. if (current == vinstr_ctx->thread) {
  929. atomic_set(&vinstr_ctx->request_pending, 0);
  930. if (!list_empty(&vinstr_ctx->waiting_clients)) {
  931. cli = list_first_entry(
  932. &vinstr_ctx->waiting_clients,
  933. struct kbase_vinstr_client,
  934. list);
  935. dump_time = cli->dump_time;
  936. }
  937. }
  938. if (!cli || ((s64)timestamp - (s64)dump_time < 0ll)) {
  939. mutex_unlock(&vinstr_ctx->lock);
  940. /* Sleep until next dumping event or service request. */
  941. if (cli) {
  942. u64 diff = dump_time - timestamp;
  943. hrtimer_start(
  944. &timer.hrtimer,
  945. ns_to_ktime(diff),
  946. HRTIMER_MODE_REL);
  947. }
  948. wait_event(
  949. vinstr_ctx->waitq,
  950. atomic_read(
  951. &vinstr_ctx->request_pending) ||
  952. kthread_should_stop());
  953. hrtimer_cancel(&timer.hrtimer);
  954. continue;
  955. }
  956. kbasep_vinstr_collect_and_accumulate(vinstr_ctx, &timestamp);
  957. INIT_LIST_HEAD(&expired_requests);
  958. /* Find all expired requests. */
  959. list_for_each_entry_safe(
  960. cli,
  961. tmp,
  962. &vinstr_ctx->waiting_clients,
  963. list) {
  964. s64 tdiff =
  965. (s64)(timestamp + DUMPING_RESOLUTION) -
  966. (s64)cli->dump_time;
  967. if (tdiff >= 0ll) {
  968. list_del(&cli->list);
  969. list_add(&cli->list, &expired_requests);
  970. } else {
  971. break;
  972. }
  973. }
  974. /* Fill data for each request found. */
  975. list_for_each_entry_safe(cli, tmp, &expired_requests, list) {
  976. /* Ensure that legacy buffer will not be used from
  977. * this kthread context. */
  978. BUG_ON(cli->buffer_count == 0);
  979. /* Expect only periodically sampled clients. */
  980. BUG_ON(cli->dump_interval == 0);
  981. kbasep_vinstr_update_client(
  982. cli,
  983. timestamp,
  984. BASE_HWCNT_READER_EVENT_PERIODIC);
  985. /* Set new dumping time. Drop missed probing times. */
  986. do {
  987. cli->dump_time += cli->dump_interval;
  988. } while (cli->dump_time < timestamp);
  989. list_del(&cli->list);
  990. kbasep_vinstr_add_dump_request(
  991. cli,
  992. &vinstr_ctx->waiting_clients);
  993. }
  994. /* Reprogram counters set if required. */
  995. kbasep_vinstr_reprogram(vinstr_ctx);
  996. mutex_unlock(&vinstr_ctx->lock);
  997. }
  998. return 0;
  999. }
  1000. /*****************************************************************************/
  1001. /**
  1002. * kbasep_vinstr_hwcnt_reader_buffer_ready - check if client has ready buffers
  1003. * @cli: pointer to vinstr client structure
  1004. *
  1005. * Return: non-zero if client has at least one dumping buffer filled that was
  1006. * not notified to user yet
  1007. */
  1008. static int kbasep_vinstr_hwcnt_reader_buffer_ready(
  1009. struct kbase_vinstr_client *cli)
  1010. {
  1011. KBASE_DEBUG_ASSERT(cli);
  1012. return atomic_read(&cli->write_idx) != atomic_read(&cli->meta_idx);
  1013. }
  1014. /**
  1015. * kbasep_vinstr_hwcnt_reader_ioctl_get_buffer - hwcnt reader's ioctl command
  1016. * @cli: pointer to vinstr client structure
  1017. * @buffer: pointer to userspace buffer
  1018. * @size: size of buffer
  1019. *
  1020. * Return: zero on success
  1021. */
  1022. static long kbasep_vinstr_hwcnt_reader_ioctl_get_buffer(
  1023. struct kbase_vinstr_client *cli, void __user *buffer,
  1024. size_t size)
  1025. {
  1026. unsigned int meta_idx = atomic_read(&cli->meta_idx);
  1027. unsigned int idx = meta_idx % cli->buffer_count;
  1028. struct kbase_hwcnt_reader_metadata *meta = &cli->dump_buffers_meta[idx];
  1029. /* Metadata sanity check. */
  1030. KBASE_DEBUG_ASSERT(idx == meta->buffer_idx);
  1031. if (sizeof(struct kbase_hwcnt_reader_metadata) != size)
  1032. return -EINVAL;
  1033. /* Check if there is any buffer available. */
  1034. if (atomic_read(&cli->write_idx) == meta_idx)
  1035. return -EAGAIN;
  1036. /* Check if previously taken buffer was put back. */
  1037. if (atomic_read(&cli->read_idx) != meta_idx)
  1038. return -EBUSY;
  1039. /* Copy next available buffer's metadata to user. */
  1040. if (copy_to_user(buffer, meta, size))
  1041. return -EFAULT;
  1042. atomic_inc(&cli->meta_idx);
  1043. return 0;
  1044. }
  1045. /**
  1046. * kbasep_vinstr_hwcnt_reader_ioctl_put_buffer - hwcnt reader's ioctl command
  1047. * @cli: pointer to vinstr client structure
  1048. * @buffer: pointer to userspace buffer
  1049. * @size: size of buffer
  1050. *
  1051. * Return: zero on success
  1052. */
  1053. static long kbasep_vinstr_hwcnt_reader_ioctl_put_buffer(
  1054. struct kbase_vinstr_client *cli, void __user *buffer,
  1055. size_t size)
  1056. {
  1057. unsigned int read_idx = atomic_read(&cli->read_idx);
  1058. unsigned int idx = read_idx % cli->buffer_count;
  1059. struct kbase_hwcnt_reader_metadata meta;
  1060. if (sizeof(struct kbase_hwcnt_reader_metadata) != size)
  1061. return -EINVAL;
  1062. /* Check if any buffer was taken. */
  1063. if (atomic_read(&cli->meta_idx) == read_idx)
  1064. return -EPERM;
  1065. /* Check if correct buffer is put back. */
  1066. if (copy_from_user(&meta, buffer, size))
  1067. return -EFAULT;
  1068. if (idx != meta.buffer_idx)
  1069. return -EINVAL;
  1070. atomic_inc(&cli->read_idx);
  1071. return 0;
  1072. }
  1073. /**
  1074. * kbasep_vinstr_hwcnt_reader_ioctl_set_interval - hwcnt reader's ioctl command
  1075. * @cli: pointer to vinstr client structure
  1076. * @interval: periodic dumping interval (disable periodic dumping if zero)
  1077. *
  1078. * Return: zero on success
  1079. */
  1080. static long kbasep_vinstr_hwcnt_reader_ioctl_set_interval(
  1081. struct kbase_vinstr_client *cli, u32 interval)
  1082. {
  1083. struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx;
  1084. KBASE_DEBUG_ASSERT(vinstr_ctx);
  1085. mutex_lock(&vinstr_ctx->lock);
  1086. if (vinstr_ctx->suspended) {
  1087. mutex_unlock(&vinstr_ctx->lock);
  1088. return -EBUSY;
  1089. }
  1090. list_del(&cli->list);
  1091. cli->dump_interval = interval;
  1092. /* If interval is non-zero, enable periodic dumping for this client. */
  1093. if (cli->dump_interval) {
  1094. if (cli->dump_interval < DUMPING_RESOLUTION)
  1095. cli->dump_interval = DUMPING_RESOLUTION;
  1096. cli->dump_time =
  1097. kbasep_vinstr_get_timestamp() + cli->dump_interval;
  1098. kbasep_vinstr_add_dump_request(
  1099. cli, &vinstr_ctx->waiting_clients);
  1100. atomic_set(&vinstr_ctx->request_pending, 1);
  1101. wake_up_all(&vinstr_ctx->waitq);
  1102. } else {
  1103. list_add(&cli->list, &vinstr_ctx->idle_clients);
  1104. }
  1105. mutex_unlock(&vinstr_ctx->lock);
  1106. return 0;
  1107. }
  1108. /**
  1109. * kbasep_vinstr_hwcnt_reader_event_mask - return event mask for event id
  1110. * @event_id: id of event
  1111. * Return: event_mask or zero if event is not supported or maskable
  1112. */
  1113. static u32 kbasep_vinstr_hwcnt_reader_event_mask(
  1114. enum base_hwcnt_reader_event event_id)
  1115. {
  1116. u32 event_mask = 0;
  1117. switch (event_id) {
  1118. case BASE_HWCNT_READER_EVENT_PREJOB:
  1119. case BASE_HWCNT_READER_EVENT_POSTJOB:
  1120. /* These event are maskable. */
  1121. event_mask = (1 << event_id);
  1122. break;
  1123. case BASE_HWCNT_READER_EVENT_MANUAL:
  1124. case BASE_HWCNT_READER_EVENT_PERIODIC:
  1125. /* These event are non-maskable. */
  1126. default:
  1127. /* These event are not supported. */
  1128. break;
  1129. }
  1130. return event_mask;
  1131. }
  1132. /**
  1133. * kbasep_vinstr_hwcnt_reader_ioctl_enable_event - hwcnt reader's ioctl command
  1134. * @cli: pointer to vinstr client structure
  1135. * @event_id: id of event to enable
  1136. *
  1137. * Return: zero on success
  1138. */
  1139. static long kbasep_vinstr_hwcnt_reader_ioctl_enable_event(
  1140. struct kbase_vinstr_client *cli,
  1141. enum base_hwcnt_reader_event event_id)
  1142. {
  1143. struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx;
  1144. u32 event_mask;
  1145. KBASE_DEBUG_ASSERT(vinstr_ctx);
  1146. event_mask = kbasep_vinstr_hwcnt_reader_event_mask(event_id);
  1147. if (!event_mask)
  1148. return -EINVAL;
  1149. mutex_lock(&vinstr_ctx->lock);
  1150. cli->event_mask |= event_mask;
  1151. mutex_unlock(&vinstr_ctx->lock);
  1152. return 0;
  1153. }
  1154. /**
  1155. * kbasep_vinstr_hwcnt_reader_ioctl_disable_event - hwcnt reader's ioctl command
  1156. * @cli: pointer to vinstr client structure
  1157. * @event_id: id of event to disable
  1158. *
  1159. * Return: zero on success
  1160. */
  1161. static long kbasep_vinstr_hwcnt_reader_ioctl_disable_event(
  1162. struct kbase_vinstr_client *cli,
  1163. enum base_hwcnt_reader_event event_id)
  1164. {
  1165. struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx;
  1166. u32 event_mask;
  1167. KBASE_DEBUG_ASSERT(vinstr_ctx);
  1168. event_mask = kbasep_vinstr_hwcnt_reader_event_mask(event_id);
  1169. if (!event_mask)
  1170. return -EINVAL;
  1171. mutex_lock(&vinstr_ctx->lock);
  1172. cli->event_mask &= ~event_mask;
  1173. mutex_unlock(&vinstr_ctx->lock);
  1174. return 0;
  1175. }
  1176. /**
  1177. * kbasep_vinstr_hwcnt_reader_ioctl_get_hwver - hwcnt reader's ioctl command
  1178. * @cli: pointer to vinstr client structure
  1179. * @hwver: pointer to user buffer where hw version will be stored
  1180. *
  1181. * Return: zero on success
  1182. */
  1183. static long kbasep_vinstr_hwcnt_reader_ioctl_get_hwver(
  1184. struct kbase_vinstr_client *cli, u32 __user *hwver)
  1185. {
  1186. struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx;
  1187. u32 ver = 5;
  1188. KBASE_DEBUG_ASSERT(vinstr_ctx);
  1189. if (kbase_hw_has_feature(vinstr_ctx->kbdev, BASE_HW_FEATURE_V4))
  1190. ver = 4;
  1191. return put_user(ver, hwver);
  1192. }
  1193. /**
  1194. * kbasep_vinstr_hwcnt_reader_ioctl - hwcnt reader's ioctl
  1195. * @filp: pointer to file structure
  1196. * @cmd: user command
  1197. * @arg: command's argument
  1198. *
  1199. * Return: zero on success
  1200. */
  1201. static long kbasep_vinstr_hwcnt_reader_ioctl(struct file *filp,
  1202. unsigned int cmd, unsigned long arg)
  1203. {
  1204. long rcode = 0;
  1205. struct kbase_vinstr_client *cli;
  1206. KBASE_DEBUG_ASSERT(filp);
  1207. cli = filp->private_data;
  1208. KBASE_DEBUG_ASSERT(cli);
  1209. if (unlikely(_IOC_TYPE(cmd) != KBASE_HWCNT_READER))
  1210. return -EINVAL;
  1211. switch (cmd) {
  1212. case KBASE_HWCNT_READER_GET_API_VERSION:
  1213. rcode = put_user(HWCNT_READER_API, (u32 __user *)arg);
  1214. break;
  1215. case KBASE_HWCNT_READER_GET_HWVER:
  1216. rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_hwver(
  1217. cli, (u32 __user *)arg);
  1218. break;
  1219. case KBASE_HWCNT_READER_GET_BUFFER_SIZE:
  1220. KBASE_DEBUG_ASSERT(cli->vinstr_ctx);
  1221. rcode = put_user(
  1222. (u32)cli->vinstr_ctx->dump_size,
  1223. (u32 __user *)arg);
  1224. break;
  1225. case KBASE_HWCNT_READER_DUMP:
  1226. rcode = kbase_vinstr_hwc_dump(
  1227. cli, BASE_HWCNT_READER_EVENT_MANUAL);
  1228. break;
  1229. case KBASE_HWCNT_READER_CLEAR:
  1230. rcode = kbase_vinstr_hwc_clear(cli);
  1231. break;
  1232. case KBASE_HWCNT_READER_GET_BUFFER:
  1233. rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_buffer(
  1234. cli, (void __user *)arg, _IOC_SIZE(cmd));
  1235. break;
  1236. case KBASE_HWCNT_READER_PUT_BUFFER:
  1237. rcode = kbasep_vinstr_hwcnt_reader_ioctl_put_buffer(
  1238. cli, (void __user *)arg, _IOC_SIZE(cmd));
  1239. break;
  1240. case KBASE_HWCNT_READER_SET_INTERVAL:
  1241. rcode = kbasep_vinstr_hwcnt_reader_ioctl_set_interval(
  1242. cli, (u32)arg);
  1243. break;
  1244. case KBASE_HWCNT_READER_ENABLE_EVENT:
  1245. rcode = kbasep_vinstr_hwcnt_reader_ioctl_enable_event(
  1246. cli, (enum base_hwcnt_reader_event)arg);
  1247. break;
  1248. case KBASE_HWCNT_READER_DISABLE_EVENT:
  1249. rcode = kbasep_vinstr_hwcnt_reader_ioctl_disable_event(
  1250. cli, (enum base_hwcnt_reader_event)arg);
  1251. break;
  1252. default:
  1253. rcode = -EINVAL;
  1254. break;
  1255. }
  1256. return rcode;
  1257. }
  1258. /**
  1259. * kbasep_vinstr_hwcnt_reader_poll - hwcnt reader's poll
  1260. * @filp: pointer to file structure
  1261. * @wait: pointer to poll table
  1262. * Return: POLLIN if data can be read without blocking, otherwise zero
  1263. */
  1264. static unsigned int kbasep_vinstr_hwcnt_reader_poll(struct file *filp,
  1265. poll_table *wait)
  1266. {
  1267. struct kbase_vinstr_client *cli;
  1268. KBASE_DEBUG_ASSERT(filp);
  1269. KBASE_DEBUG_ASSERT(wait);
  1270. cli = filp->private_data;
  1271. KBASE_DEBUG_ASSERT(cli);
  1272. poll_wait(filp, &cli->waitq, wait);
  1273. if (kbasep_vinstr_hwcnt_reader_buffer_ready(cli))
  1274. return POLLIN;
  1275. return 0;
  1276. }
  1277. /**
  1278. * kbasep_vinstr_hwcnt_reader_mmap - hwcnt reader's mmap
  1279. * @filp: pointer to file structure
  1280. * @vma: pointer to vma structure
  1281. * Return: zero on success
  1282. */
  1283. static int kbasep_vinstr_hwcnt_reader_mmap(struct file *filp,
  1284. struct vm_area_struct *vma)
  1285. {
  1286. struct kbase_vinstr_client *cli;
  1287. unsigned long size, addr, pfn, offset;
  1288. unsigned long vm_size = vma->vm_end - vma->vm_start;
  1289. KBASE_DEBUG_ASSERT(filp);
  1290. KBASE_DEBUG_ASSERT(vma);
  1291. cli = filp->private_data;
  1292. KBASE_DEBUG_ASSERT(cli);
  1293. size = cli->buffer_count * cli->dump_size;
  1294. if (vma->vm_pgoff > (size >> PAGE_SHIFT))
  1295. return -EINVAL;
  1296. if (vm_size > size)
  1297. return -EINVAL;
  1298. offset = vma->vm_pgoff << PAGE_SHIFT;
  1299. if ((vm_size + offset) > size)
  1300. return -EINVAL;
  1301. addr = __pa((unsigned long)cli->dump_buffers + offset);
  1302. pfn = addr >> PAGE_SHIFT;
  1303. return remap_pfn_range(
  1304. vma,
  1305. vma->vm_start,
  1306. pfn,
  1307. vm_size,
  1308. vma->vm_page_prot);
  1309. }
  1310. /**
  1311. * kbasep_vinstr_hwcnt_reader_release - hwcnt reader's release
  1312. * @inode: pointer to inode structure
  1313. * @filp: pointer to file structure
  1314. * Return always return zero
  1315. */
  1316. static int kbasep_vinstr_hwcnt_reader_release(struct inode *inode,
  1317. struct file *filp)
  1318. {
  1319. struct kbase_vinstr_client *cli;
  1320. KBASE_DEBUG_ASSERT(inode);
  1321. KBASE_DEBUG_ASSERT(filp);
  1322. cli = filp->private_data;
  1323. KBASE_DEBUG_ASSERT(cli);
  1324. kbase_vinstr_detach_client(cli);
  1325. return 0;
  1326. }
  1327. /*****************************************************************************/
  1328. struct kbase_vinstr_context *kbase_vinstr_init(struct kbase_device *kbdev)
  1329. {
  1330. struct kbase_vinstr_context *vinstr_ctx;
  1331. vinstr_ctx = kzalloc(sizeof(*vinstr_ctx), GFP_KERNEL);
  1332. if (!vinstr_ctx)
  1333. return NULL;
  1334. INIT_LIST_HEAD(&vinstr_ctx->idle_clients);
  1335. INIT_LIST_HEAD(&vinstr_ctx->waiting_clients);
  1336. mutex_init(&vinstr_ctx->lock);
  1337. vinstr_ctx->kbdev = kbdev;
  1338. vinstr_ctx->thread = NULL;
  1339. atomic_set(&vinstr_ctx->request_pending, 0);
  1340. init_waitqueue_head(&vinstr_ctx->waitq);
  1341. return vinstr_ctx;
  1342. }
  1343. void kbase_vinstr_term(struct kbase_vinstr_context *vinstr_ctx)
  1344. {
  1345. struct kbase_vinstr_client *cli;
  1346. /* Stop service thread first. */
  1347. if (vinstr_ctx->thread)
  1348. kthread_stop(vinstr_ctx->thread);
  1349. while (1) {
  1350. struct list_head *list = &vinstr_ctx->idle_clients;
  1351. if (list_empty(list)) {
  1352. list = &vinstr_ctx->waiting_clients;
  1353. if (list_empty(list))
  1354. break;
  1355. }
  1356. cli = list_first_entry(list, struct kbase_vinstr_client, list);
  1357. list_del(&cli->list);
  1358. kfree(cli->accum_buffer);
  1359. kfree(cli);
  1360. vinstr_ctx->nclients--;
  1361. }
  1362. KBASE_DEBUG_ASSERT(!vinstr_ctx->nclients);
  1363. if (vinstr_ctx->kctx)
  1364. kbasep_vinstr_destroy_kctx(vinstr_ctx);
  1365. kfree(vinstr_ctx);
  1366. }
  1367. int kbase_vinstr_hwcnt_reader_setup(struct kbase_vinstr_context *vinstr_ctx,
  1368. struct kbase_uk_hwcnt_reader_setup *setup)
  1369. {
  1370. struct kbase_vinstr_client *cli;
  1371. u32 bitmap[4];
  1372. KBASE_DEBUG_ASSERT(vinstr_ctx);
  1373. KBASE_DEBUG_ASSERT(setup);
  1374. KBASE_DEBUG_ASSERT(setup->buffer_count);
  1375. bitmap[SHADER_HWCNT_BM] = setup->shader_bm;
  1376. bitmap[TILER_HWCNT_BM] = setup->tiler_bm;
  1377. bitmap[MMU_L2_HWCNT_BM] = setup->mmu_l2_bm;
  1378. bitmap[JM_HWCNT_BM] = setup->jm_bm;
  1379. cli = kbasep_vinstr_attach_client(
  1380. vinstr_ctx,
  1381. setup->buffer_count,
  1382. bitmap,
  1383. &setup->fd,
  1384. NULL);
  1385. if (!cli)
  1386. return -ENOMEM;
  1387. return 0;
  1388. }
  1389. int kbase_vinstr_legacy_hwc_setup(
  1390. struct kbase_vinstr_context *vinstr_ctx,
  1391. struct kbase_vinstr_client **cli,
  1392. struct kbase_uk_hwcnt_setup *setup)
  1393. {
  1394. KBASE_DEBUG_ASSERT(vinstr_ctx);
  1395. KBASE_DEBUG_ASSERT(setup);
  1396. KBASE_DEBUG_ASSERT(cli);
  1397. if (setup->dump_buffer) {
  1398. u32 bitmap[4];
  1399. bitmap[SHADER_HWCNT_BM] = setup->shader_bm;
  1400. bitmap[TILER_HWCNT_BM] = setup->tiler_bm;
  1401. bitmap[MMU_L2_HWCNT_BM] = setup->mmu_l2_bm;
  1402. bitmap[JM_HWCNT_BM] = setup->jm_bm;
  1403. if (*cli)
  1404. return -EBUSY;
  1405. *cli = kbasep_vinstr_attach_client(
  1406. vinstr_ctx,
  1407. 0,
  1408. bitmap,
  1409. (void *)(long)setup->dump_buffer,
  1410. NULL);
  1411. if (!(*cli))
  1412. return -ENOMEM;
  1413. } else {
  1414. if (!*cli)
  1415. return -EINVAL;
  1416. kbase_vinstr_detach_client(*cli);
  1417. *cli = NULL;
  1418. }
  1419. return 0;
  1420. }
  1421. struct kbase_vinstr_client *kbase_vinstr_hwcnt_kernel_setup(
  1422. struct kbase_vinstr_context *vinstr_ctx,
  1423. struct kbase_uk_hwcnt_reader_setup *setup,
  1424. void *kernel_buffer)
  1425. {
  1426. u32 bitmap[4];
  1427. if (!vinstr_ctx || !setup || !kernel_buffer)
  1428. return NULL;
  1429. bitmap[SHADER_HWCNT_BM] = setup->shader_bm;
  1430. bitmap[TILER_HWCNT_BM] = setup->tiler_bm;
  1431. bitmap[MMU_L2_HWCNT_BM] = setup->mmu_l2_bm;
  1432. bitmap[JM_HWCNT_BM] = setup->jm_bm;
  1433. return kbasep_vinstr_attach_client(
  1434. vinstr_ctx,
  1435. 0,
  1436. bitmap,
  1437. NULL,
  1438. kernel_buffer);
  1439. }
  1440. int kbase_vinstr_hwc_dump(struct kbase_vinstr_client *cli,
  1441. enum base_hwcnt_reader_event event_id)
  1442. {
  1443. int rcode = 0;
  1444. struct kbase_vinstr_context *vinstr_ctx;
  1445. u64 timestamp;
  1446. u32 event_mask;
  1447. if (!cli)
  1448. return -EINVAL;
  1449. vinstr_ctx = cli->vinstr_ctx;
  1450. KBASE_DEBUG_ASSERT(vinstr_ctx);
  1451. KBASE_DEBUG_ASSERT(event_id < BASE_HWCNT_READER_EVENT_COUNT);
  1452. event_mask = 1 << event_id;
  1453. mutex_lock(&vinstr_ctx->lock);
  1454. if (vinstr_ctx->suspended) {
  1455. rcode = -EBUSY;
  1456. goto exit;
  1457. }
  1458. if (event_mask & cli->event_mask) {
  1459. rcode = kbasep_vinstr_collect_and_accumulate(
  1460. vinstr_ctx,
  1461. &timestamp);
  1462. if (rcode)
  1463. goto exit;
  1464. rcode = kbasep_vinstr_update_client(cli, timestamp, event_id);
  1465. if (rcode)
  1466. goto exit;
  1467. kbasep_vinstr_reprogram(vinstr_ctx);
  1468. }
  1469. exit:
  1470. mutex_unlock(&vinstr_ctx->lock);
  1471. return rcode;
  1472. }
  1473. int kbase_vinstr_hwc_clear(struct kbase_vinstr_client *cli)
  1474. {
  1475. struct kbase_vinstr_context *vinstr_ctx;
  1476. int rcode;
  1477. u64 unused;
  1478. if (!cli)
  1479. return -EINVAL;
  1480. vinstr_ctx = cli->vinstr_ctx;
  1481. KBASE_DEBUG_ASSERT(vinstr_ctx);
  1482. mutex_lock(&vinstr_ctx->lock);
  1483. if (vinstr_ctx->suspended) {
  1484. rcode = -EBUSY;
  1485. goto exit;
  1486. }
  1487. rcode = kbasep_vinstr_collect_and_accumulate(vinstr_ctx, &unused);
  1488. if (rcode)
  1489. goto exit;
  1490. rcode = kbase_instr_hwcnt_clear(vinstr_ctx->kctx);
  1491. if (rcode)
  1492. goto exit;
  1493. memset(cli->accum_buffer, 0, cli->dump_size);
  1494. kbasep_vinstr_reprogram(vinstr_ctx);
  1495. exit:
  1496. mutex_unlock(&vinstr_ctx->lock);
  1497. return rcode;
  1498. }
  1499. void kbase_vinstr_hwc_suspend(struct kbase_vinstr_context *vinstr_ctx)
  1500. {
  1501. u64 unused;
  1502. KBASE_DEBUG_ASSERT(vinstr_ctx);
  1503. mutex_lock(&vinstr_ctx->lock);
  1504. if (!vinstr_ctx->nclients || vinstr_ctx->suspended) {
  1505. mutex_unlock(&vinstr_ctx->lock);
  1506. return;
  1507. }
  1508. kbasep_vinstr_collect_and_accumulate(vinstr_ctx, &unused);
  1509. vinstr_ctx->suspended = true;
  1510. vinstr_ctx->suspended_clients = vinstr_ctx->waiting_clients;
  1511. INIT_LIST_HEAD(&vinstr_ctx->waiting_clients);
  1512. mutex_unlock(&vinstr_ctx->lock);
  1513. }
  1514. void kbase_vinstr_hwc_resume(struct kbase_vinstr_context *vinstr_ctx)
  1515. {
  1516. KBASE_DEBUG_ASSERT(vinstr_ctx);
  1517. mutex_lock(&vinstr_ctx->lock);
  1518. if (!vinstr_ctx->nclients || !vinstr_ctx->suspended) {
  1519. mutex_unlock(&vinstr_ctx->lock);
  1520. return;
  1521. }
  1522. vinstr_ctx->suspended = false;
  1523. vinstr_ctx->waiting_clients = vinstr_ctx->suspended_clients;
  1524. vinstr_ctx->reprogram = true;
  1525. kbasep_vinstr_reprogram(vinstr_ctx);
  1526. atomic_set(&vinstr_ctx->request_pending, 1);
  1527. wake_up_all(&vinstr_ctx->waitq);
  1528. mutex_unlock(&vinstr_ctx->lock);
  1529. }