mali_kbase_device.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697
  1. /*
  2. *
  3. * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
  4. *
  5. * This program is free software and is provided to you under the terms of the
  6. * GNU General Public License version 2 as published by the Free Software
  7. * Foundation, and any use by you of this program is subject to the terms
  8. * of such GNU licence.
  9. *
  10. * A copy of the licence is included with the program, and can also be obtained
  11. * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  12. * Boston, MA 02110-1301, USA.
  13. *
  14. */
  15. /*
  16. * Base kernel device APIs
  17. */
  18. #include <linux/debugfs.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include <linux/of_platform.h>
  24. #include <mali_kbase.h>
  25. #include <mali_kbase_defs.h>
  26. #include <mali_kbase_hw.h>
  27. #include <mali_kbase_config_defaults.h>
  28. #include <mali_kbase_profiling_gator_api.h>
  29. /* NOTE: Magic - 0x45435254 (TRCE in ASCII).
  30. * Supports tracing feature provided in the base module.
  31. * Please keep it in sync with the value of base module.
  32. */
  33. #define TRACE_BUFFER_HEADER_SPECIAL 0x45435254
  34. #if KBASE_TRACE_ENABLE
  35. static const char *kbasep_trace_code_string[] = {
  36. /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
  37. * THIS MUST BE USED AT THE START OF THE ARRAY */
  38. #define KBASE_TRACE_CODE_MAKE_CODE(X) # X
  39. #include "mali_kbase_trace_defs.h"
  40. #undef KBASE_TRACE_CODE_MAKE_CODE
  41. };
  42. #endif
  43. #define DEBUG_MESSAGE_SIZE 256
  44. static int kbasep_trace_init(struct kbase_device *kbdev);
  45. static void kbasep_trace_term(struct kbase_device *kbdev);
  46. static void kbasep_trace_hook_wrapper(void *param);
  47. struct kbase_device *kbase_device_alloc(void)
  48. {
  49. return kzalloc(sizeof(struct kbase_device), GFP_KERNEL);
  50. }
  51. static int kbase_device_as_init(struct kbase_device *kbdev, int i)
  52. {
  53. const char format[] = "mali_mmu%d";
  54. char name[sizeof(format)];
  55. const char poke_format[] = "mali_mmu%d_poker";
  56. char poke_name[sizeof(poke_format)];
  57. if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
  58. snprintf(poke_name, sizeof(poke_name), poke_format, i);
  59. snprintf(name, sizeof(name), format, i);
  60. kbdev->as[i].number = i;
  61. kbdev->as[i].fault_addr = 0ULL;
  62. kbdev->as[i].pf_wq = alloc_workqueue(name, 0, 1);
  63. if (!kbdev->as[i].pf_wq)
  64. return -EINVAL;
  65. mutex_init(&kbdev->as[i].transaction_mutex);
  66. INIT_WORK(&kbdev->as[i].work_pagefault, page_fault_worker);
  67. INIT_WORK(&kbdev->as[i].work_busfault, bus_fault_worker);
  68. if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
  69. struct hrtimer *poke_timer = &kbdev->as[i].poke_timer;
  70. struct work_struct *poke_work = &kbdev->as[i].poke_work;
  71. kbdev->as[i].poke_wq = alloc_workqueue(poke_name, 0, 1);
  72. if (!kbdev->as[i].poke_wq) {
  73. destroy_workqueue(kbdev->as[i].pf_wq);
  74. return -EINVAL;
  75. }
  76. KBASE_DEBUG_ASSERT(!object_is_on_stack(poke_work));
  77. INIT_WORK(poke_work, kbasep_as_do_poke);
  78. hrtimer_init(poke_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  79. poke_timer->function = kbasep_as_poke_timer_callback;
  80. kbdev->as[i].poke_refcount = 0;
  81. kbdev->as[i].poke_state = 0u;
  82. }
  83. return 0;
  84. }
  85. static void kbase_device_as_term(struct kbase_device *kbdev, int i)
  86. {
  87. destroy_workqueue(kbdev->as[i].pf_wq);
  88. if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
  89. destroy_workqueue(kbdev->as[i].poke_wq);
  90. }
  91. static int kbase_device_all_as_init(struct kbase_device *kbdev)
  92. {
  93. int i, err;
  94. for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
  95. err = kbase_device_as_init(kbdev, i);
  96. if (err)
  97. goto free_workqs;
  98. }
  99. return 0;
  100. free_workqs:
  101. for (; i > 0; i--)
  102. kbase_device_as_term(kbdev, i);
  103. return err;
  104. }
  105. static void kbase_device_all_as_term(struct kbase_device *kbdev)
  106. {
  107. int i;
  108. for (i = 0; i < kbdev->nr_hw_address_spaces; i++)
  109. kbase_device_as_term(kbdev, i);
  110. }
  111. int kbase_device_init(struct kbase_device * const kbdev)
  112. {
  113. int i, err;
  114. #ifdef CONFIG_ARM64
  115. struct device_node *np = NULL;
  116. #endif /* CONFIG_ARM64 */
  117. spin_lock_init(&kbdev->mmu_mask_change);
  118. #ifdef CONFIG_ARM64
  119. kbdev->cci_snoop_enabled = false;
  120. np = kbdev->dev->of_node;
  121. if (np != NULL) {
  122. if (of_property_read_u32(np, "snoop_enable_smc",
  123. &kbdev->snoop_enable_smc))
  124. kbdev->snoop_enable_smc = 0;
  125. if (of_property_read_u32(np, "snoop_disable_smc",
  126. &kbdev->snoop_disable_smc))
  127. kbdev->snoop_disable_smc = 0;
  128. /* Either both or none of the calls should be provided. */
  129. if (!((kbdev->snoop_disable_smc == 0
  130. && kbdev->snoop_enable_smc == 0)
  131. || (kbdev->snoop_disable_smc != 0
  132. && kbdev->snoop_enable_smc != 0))) {
  133. WARN_ON(1);
  134. err = -EINVAL;
  135. goto fail;
  136. }
  137. }
  138. #endif /* CONFIG_ARM64 */
  139. /* Get the list of workarounds for issues on the current HW
  140. * (identified by the GPU_ID register)
  141. */
  142. err = kbase_hw_set_issues_mask(kbdev);
  143. if (err)
  144. goto fail;
  145. /* Set the list of features available on the current HW
  146. * (identified by the GPU_ID register)
  147. */
  148. kbase_hw_set_features_mask(kbdev);
  149. kbase_gpuprops_set_features(kbdev);
  150. /* On Linux 4.0+, dma coherency is determined from device tree */
  151. #if defined(CONFIG_ARM64) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
  152. set_dma_ops(kbdev->dev, &noncoherent_swiotlb_dma_ops);
  153. #endif
  154. /* Workaround a pre-3.13 Linux issue, where dma_mask is NULL when our
  155. * device structure was created by device-tree
  156. */
  157. if (!kbdev->dev->dma_mask)
  158. kbdev->dev->dma_mask = &kbdev->dev->coherent_dma_mask;
  159. err = dma_set_mask(kbdev->dev,
  160. DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
  161. if (err)
  162. goto dma_set_mask_failed;
  163. err = dma_set_coherent_mask(kbdev->dev,
  164. DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
  165. if (err)
  166. goto dma_set_mask_failed;
  167. kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces;
  168. err = kbase_device_all_as_init(kbdev);
  169. if (err)
  170. goto as_init_failed;
  171. spin_lock_init(&kbdev->hwcnt.lock);
  172. err = kbasep_trace_init(kbdev);
  173. if (err)
  174. goto term_as;
  175. mutex_init(&kbdev->cacheclean_lock);
  176. #ifdef CONFIG_MALI_TRACE_TIMELINE
  177. for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i)
  178. kbdev->timeline.slot_atoms_submitted[i] = 0;
  179. for (i = 0; i <= KBASEP_TIMELINE_PM_EVENT_LAST; ++i)
  180. atomic_set(&kbdev->timeline.pm_event_uid[i], 0);
  181. #endif /* CONFIG_MALI_TRACE_TIMELINE */
  182. /* fbdump profiling controls set to 0 - fbdump not enabled until changed by gator */
  183. for (i = 0; i < FBDUMP_CONTROL_MAX; i++)
  184. kbdev->kbase_profiling_controls[i] = 0;
  185. kbase_debug_assert_register_hook(&kbasep_trace_hook_wrapper, kbdev);
  186. atomic_set(&kbdev->ctx_num, 0);
  187. err = kbase_instr_backend_init(kbdev);
  188. if (err)
  189. goto term_trace;
  190. kbdev->pm.dvfs_period = DEFAULT_PM_DVFS_PERIOD;
  191. kbdev->reset_timeout_ms = DEFAULT_RESET_TIMEOUT_MS;
  192. #ifdef CONFIG_MALI_GPU_MMU_AARCH64
  193. kbdev->mmu_mode = kbase_mmu_mode_get_aarch64();
  194. #else
  195. kbdev->mmu_mode = kbase_mmu_mode_get_lpae();
  196. #endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
  197. #ifdef CONFIG_MALI_DEBUG
  198. init_waitqueue_head(&kbdev->driver_inactive_wait);
  199. #endif /* CONFIG_MALI_DEBUG */
  200. return 0;
  201. term_trace:
  202. kbasep_trace_term(kbdev);
  203. term_as:
  204. kbase_device_all_as_term(kbdev);
  205. as_init_failed:
  206. dma_set_mask_failed:
  207. fail:
  208. return err;
  209. }
  210. void kbase_device_term(struct kbase_device *kbdev)
  211. {
  212. KBASE_DEBUG_ASSERT(kbdev);
  213. #if KBASE_TRACE_ENABLE
  214. kbase_debug_assert_register_hook(NULL, NULL);
  215. #endif
  216. kbase_instr_backend_term(kbdev);
  217. kbasep_trace_term(kbdev);
  218. kbase_device_all_as_term(kbdev);
  219. }
  220. void kbase_device_free(struct kbase_device *kbdev)
  221. {
  222. kfree(kbdev);
  223. }
  224. int kbase_device_trace_buffer_install(
  225. struct kbase_context *kctx, u32 *tb, size_t size)
  226. {
  227. unsigned long flags;
  228. KBASE_DEBUG_ASSERT(kctx);
  229. KBASE_DEBUG_ASSERT(tb);
  230. /* Interface uses 16-bit value to track last accessed entry. Each entry
  231. * is composed of two 32-bit words.
  232. * This limits the size that can be handled without an overflow. */
  233. if (0xFFFF * (2 * sizeof(u32)) < size)
  234. return -EINVAL;
  235. /* set up the header */
  236. /* magic number in the first 4 bytes */
  237. tb[0] = TRACE_BUFFER_HEADER_SPECIAL;
  238. /* Store (write offset = 0, wrap counter = 0, transaction active = no)
  239. * write offset 0 means never written.
  240. * Offsets 1 to (wrap_offset - 1) used to store values when trace started
  241. */
  242. tb[1] = 0;
  243. /* install trace buffer */
  244. spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
  245. kctx->jctx.tb_wrap_offset = size / 8;
  246. kctx->jctx.tb = tb;
  247. spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
  248. return 0;
  249. }
  250. void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx)
  251. {
  252. unsigned long flags;
  253. KBASE_DEBUG_ASSERT(kctx);
  254. spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
  255. kctx->jctx.tb = NULL;
  256. kctx->jctx.tb_wrap_offset = 0;
  257. spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
  258. }
  259. void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value)
  260. {
  261. unsigned long flags;
  262. spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
  263. if (kctx->jctx.tb) {
  264. u16 wrap_count;
  265. u16 write_offset;
  266. u32 *tb = kctx->jctx.tb;
  267. u32 header_word;
  268. header_word = tb[1];
  269. KBASE_DEBUG_ASSERT(0 == (header_word & 0x1));
  270. wrap_count = (header_word >> 1) & 0x7FFF;
  271. write_offset = (header_word >> 16) & 0xFFFF;
  272. /* mark as transaction in progress */
  273. tb[1] |= 0x1;
  274. mb();
  275. /* calculate new offset */
  276. write_offset++;
  277. if (write_offset == kctx->jctx.tb_wrap_offset) {
  278. /* wrap */
  279. write_offset = 1;
  280. wrap_count++;
  281. wrap_count &= 0x7FFF; /* 15bit wrap counter */
  282. }
  283. /* store the trace entry at the selected offset */
  284. tb[write_offset * 2 + 0] = (reg_offset & ~0x3) | ((type == REG_WRITE) ? 0x1 : 0x0);
  285. tb[write_offset * 2 + 1] = reg_value;
  286. mb();
  287. /* new header word */
  288. header_word = (write_offset << 16) | (wrap_count << 1) | 0x0; /* transaction complete */
  289. tb[1] = header_word;
  290. }
  291. spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
  292. }
  293. /*
  294. * Device trace functions
  295. */
  296. #if KBASE_TRACE_ENABLE
  297. static int kbasep_trace_init(struct kbase_device *kbdev)
  298. {
  299. struct kbase_trace *rbuf;
  300. rbuf = kmalloc_array(KBASE_TRACE_SIZE, sizeof(*rbuf), GFP_KERNEL);
  301. if (!rbuf)
  302. return -EINVAL;
  303. kbdev->trace_rbuf = rbuf;
  304. spin_lock_init(&kbdev->trace_lock);
  305. return 0;
  306. }
  307. static void kbasep_trace_term(struct kbase_device *kbdev)
  308. {
  309. kfree(kbdev->trace_rbuf);
  310. }
  311. static void kbasep_trace_format_msg(struct kbase_trace *trace_msg, char *buffer, int len)
  312. {
  313. s32 written = 0;
  314. /* Initial part of message */
  315. written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d.%.6d,%d,%d,%s,%p,", (int)trace_msg->timestamp.tv_sec, (int)(trace_msg->timestamp.tv_nsec / 1000), trace_msg->thread_id, trace_msg->cpu, kbasep_trace_code_string[trace_msg->code], trace_msg->ctx), 0);
  316. if (trace_msg->katom)
  317. written += MAX(snprintf(buffer + written, MAX(len - written, 0), "atom %d (ud: 0x%llx 0x%llx)", trace_msg->atom_number, trace_msg->atom_udata[0], trace_msg->atom_udata[1]), 0);
  318. written += MAX(snprintf(buffer + written, MAX(len - written, 0), ",%.8llx,", trace_msg->gpu_addr), 0);
  319. /* NOTE: Could add function callbacks to handle different message types */
  320. /* Jobslot present */
  321. if (trace_msg->flags & KBASE_TRACE_FLAG_JOBSLOT)
  322. written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->jobslot), 0);
  323. written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
  324. /* Refcount present */
  325. if (trace_msg->flags & KBASE_TRACE_FLAG_REFCOUNT)
  326. written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->refcount), 0);
  327. written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
  328. /* Rest of message */
  329. written += MAX(snprintf(buffer + written, MAX(len - written, 0), "0x%.8lx", trace_msg->info_val), 0);
  330. }
  331. static void kbasep_trace_dump_msg(struct kbase_device *kbdev, struct kbase_trace *trace_msg)
  332. {
  333. char buffer[DEBUG_MESSAGE_SIZE];
  334. kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
  335. dev_dbg(kbdev->dev, "%s", buffer);
  336. }
  337. void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
  338. {
  339. unsigned long irqflags;
  340. struct kbase_trace *trace_msg;
  341. spin_lock_irqsave(&kbdev->trace_lock, irqflags);
  342. trace_msg = &kbdev->trace_rbuf[kbdev->trace_next_in];
  343. /* Fill the message */
  344. trace_msg->thread_id = task_pid_nr(current);
  345. trace_msg->cpu = task_cpu(current);
  346. getnstimeofday(&trace_msg->timestamp);
  347. trace_msg->code = code;
  348. trace_msg->ctx = ctx;
  349. if (NULL == katom) {
  350. trace_msg->katom = false;
  351. } else {
  352. trace_msg->katom = true;
  353. trace_msg->atom_number = kbase_jd_atom_id(katom->kctx, katom);
  354. trace_msg->atom_udata[0] = katom->udata.blob[0];
  355. trace_msg->atom_udata[1] = katom->udata.blob[1];
  356. }
  357. trace_msg->gpu_addr = gpu_addr;
  358. trace_msg->jobslot = jobslot;
  359. trace_msg->refcount = MIN((unsigned int)refcount, 0xFF);
  360. trace_msg->info_val = info_val;
  361. trace_msg->flags = flags;
  362. /* Update the ringbuffer indices */
  363. kbdev->trace_next_in = (kbdev->trace_next_in + 1) & KBASE_TRACE_MASK;
  364. if (kbdev->trace_next_in == kbdev->trace_first_out)
  365. kbdev->trace_first_out = (kbdev->trace_first_out + 1) & KBASE_TRACE_MASK;
  366. /* Done */
  367. spin_unlock_irqrestore(&kbdev->trace_lock, irqflags);
  368. }
  369. void kbasep_trace_clear(struct kbase_device *kbdev)
  370. {
  371. unsigned long flags;
  372. spin_lock_irqsave(&kbdev->trace_lock, flags);
  373. kbdev->trace_first_out = kbdev->trace_next_in;
  374. spin_unlock_irqrestore(&kbdev->trace_lock, flags);
  375. }
  376. void kbasep_trace_dump(struct kbase_device *kbdev)
  377. {
  378. unsigned long flags;
  379. u32 start;
  380. u32 end;
  381. dev_dbg(kbdev->dev, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
  382. spin_lock_irqsave(&kbdev->trace_lock, flags);
  383. start = kbdev->trace_first_out;
  384. end = kbdev->trace_next_in;
  385. while (start != end) {
  386. struct kbase_trace *trace_msg = &kbdev->trace_rbuf[start];
  387. kbasep_trace_dump_msg(kbdev, trace_msg);
  388. start = (start + 1) & KBASE_TRACE_MASK;
  389. }
  390. dev_dbg(kbdev->dev, "TRACE_END");
  391. spin_unlock_irqrestore(&kbdev->trace_lock, flags);
  392. KBASE_TRACE_CLEAR(kbdev);
  393. }
  394. static void kbasep_trace_hook_wrapper(void *param)
  395. {
  396. struct kbase_device *kbdev = (struct kbase_device *)param;
  397. kbasep_trace_dump(kbdev);
  398. }
  399. #ifdef CONFIG_DEBUG_FS
  400. struct trace_seq_state {
  401. struct kbase_trace trace_buf[KBASE_TRACE_SIZE];
  402. u32 start;
  403. u32 end;
  404. };
  405. static void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos)
  406. {
  407. struct trace_seq_state *state = s->private;
  408. int i;
  409. if (*pos > KBASE_TRACE_SIZE)
  410. return NULL;
  411. i = state->start + *pos;
  412. if ((state->end >= state->start && i >= state->end) ||
  413. i >= state->end + KBASE_TRACE_SIZE)
  414. return NULL;
  415. i &= KBASE_TRACE_MASK;
  416. return &state->trace_buf[i];
  417. }
  418. static void kbasep_trace_seq_stop(struct seq_file *s, void *data)
  419. {
  420. }
  421. static void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos)
  422. {
  423. struct trace_seq_state *state = s->private;
  424. int i;
  425. (*pos)++;
  426. i = (state->start + *pos) & KBASE_TRACE_MASK;
  427. if (i == state->end)
  428. return NULL;
  429. return &state->trace_buf[i];
  430. }
  431. static int kbasep_trace_seq_show(struct seq_file *s, void *data)
  432. {
  433. struct kbase_trace *trace_msg = data;
  434. char buffer[DEBUG_MESSAGE_SIZE];
  435. kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
  436. seq_printf(s, "%s\n", buffer);
  437. return 0;
  438. }
  439. static const struct seq_operations kbasep_trace_seq_ops = {
  440. .start = kbasep_trace_seq_start,
  441. .next = kbasep_trace_seq_next,
  442. .stop = kbasep_trace_seq_stop,
  443. .show = kbasep_trace_seq_show,
  444. };
  445. static int kbasep_trace_debugfs_open(struct inode *inode, struct file *file)
  446. {
  447. struct kbase_device *kbdev = inode->i_private;
  448. unsigned long flags;
  449. struct trace_seq_state *state;
  450. state = __seq_open_private(file, &kbasep_trace_seq_ops, sizeof(*state));
  451. if (!state)
  452. return -ENOMEM;
  453. spin_lock_irqsave(&kbdev->trace_lock, flags);
  454. state->start = kbdev->trace_first_out;
  455. state->end = kbdev->trace_next_in;
  456. memcpy(state->trace_buf, kbdev->trace_rbuf, sizeof(state->trace_buf));
  457. spin_unlock_irqrestore(&kbdev->trace_lock, flags);
  458. return 0;
  459. }
  460. static const struct file_operations kbasep_trace_debugfs_fops = {
  461. .open = kbasep_trace_debugfs_open,
  462. .read = seq_read,
  463. .llseek = seq_lseek,
  464. .release = seq_release_private,
  465. };
  466. void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
  467. {
  468. debugfs_create_file("mali_trace", S_IRUGO,
  469. kbdev->mali_debugfs_directory, kbdev,
  470. &kbasep_trace_debugfs_fops);
  471. }
  472. #else
  473. void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
  474. {
  475. }
  476. #endif /* CONFIG_DEBUG_FS */
  477. #else /* KBASE_TRACE_ENABLE */
  478. static int kbasep_trace_init(struct kbase_device *kbdev)
  479. {
  480. CSTD_UNUSED(kbdev);
  481. return 0;
  482. }
  483. static void kbasep_trace_term(struct kbase_device *kbdev)
  484. {
  485. CSTD_UNUSED(kbdev);
  486. }
  487. static void kbasep_trace_hook_wrapper(void *param)
  488. {
  489. CSTD_UNUSED(param);
  490. }
  491. void kbasep_trace_dump(struct kbase_device *kbdev)
  492. {
  493. CSTD_UNUSED(kbdev);
  494. }
  495. #endif /* KBASE_TRACE_ENABLE */
  496. void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value)
  497. {
  498. switch (control) {
  499. case FBDUMP_CONTROL_ENABLE:
  500. /* fall through */
  501. case FBDUMP_CONTROL_RATE:
  502. /* fall through */
  503. case SW_COUNTER_ENABLE:
  504. /* fall through */
  505. case FBDUMP_CONTROL_RESIZE_FACTOR:
  506. kbdev->kbase_profiling_controls[control] = value;
  507. break;
  508. default:
  509. dev_err(kbdev->dev, "Profiling control %d not found\n", control);
  510. break;
  511. }
  512. }
  513. u32 kbase_get_profiling_control(struct kbase_device *kbdev, u32 control)
  514. {
  515. u32 ret_value = 0;
  516. switch (control) {
  517. case FBDUMP_CONTROL_ENABLE:
  518. /* fall through */
  519. case FBDUMP_CONTROL_RATE:
  520. /* fall through */
  521. case SW_COUNTER_ENABLE:
  522. /* fall through */
  523. case FBDUMP_CONTROL_RESIZE_FACTOR:
  524. ret_value = kbdev->kbase_profiling_controls[control];
  525. break;
  526. default:
  527. dev_err(kbdev->dev, "Profiling control %d not found\n", control);
  528. break;
  529. }
  530. return ret_value;
  531. }
  532. /*
  533. * Called by gator to control the production of
  534. * profiling information at runtime
  535. * */
  536. void _mali_profiling_control(u32 action, u32 value)
  537. {
  538. struct kbase_device *kbdev = NULL;
  539. /* find the first i.e. call with -1 */
  540. kbdev = kbase_find_device(-1);
  541. if (NULL != kbdev)
  542. kbase_set_profiling_control(kbdev, action, value);
  543. }
  544. KBASE_EXPORT_SYMBOL(_mali_profiling_control);