mtk_trace.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. /*
  2. * Copyright (C) 2015 MediaTek Inc.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/ring_buffer.h>
  14. #include <linux/trace_events.h>
  15. #include "mtk_ftrace.h"
  16. #include "trace.h"
  17. #ifdef CONFIG_MTK_PERF_TRACKER
  18. #include <mt-plat/perf_tracker.h>
  19. #endif
  20. #ifdef CONFIG_MTK_KERNEL_MARKER
  21. static unsigned long __read_mostly mark_addr;
  22. static bool kernel_marker_on = true;
  23. static inline void update_tracing_mark_write_addr(void)
  24. {
  25. if (unlikely(mark_addr == 0))
  26. mark_addr = kallsyms_lookup_name("tracing_mark_write");
  27. }
  28. inline void trace_begin(char *name)
  29. {
  30. if (unlikely(kernel_marker_on) && name) {
  31. preempt_disable();
  32. event_trace_printk(mark_addr, "B|%d|%s\n",
  33. current->tgid, name);
  34. preempt_enable();
  35. }
  36. }
  37. EXPORT_SYMBOL(trace_begin);
  38. inline void trace_counter(char *name, int count)
  39. {
  40. if (unlikely(kernel_marker_on) && name) {
  41. preempt_disable();
  42. event_trace_printk(mark_addr, "C|%d|%s|%d\n",
  43. current->tgid, name, count);
  44. preempt_enable();
  45. }
  46. }
  47. EXPORT_SYMBOL(trace_counter);
  48. inline void trace_end(void)
  49. {
  50. if (unlikely(kernel_marker_on)) {
  51. preempt_disable();
  52. event_trace_printk(mark_addr, "E\n");
  53. preempt_enable();
  54. }
  55. }
  56. EXPORT_SYMBOL(trace_end);
  57. static ssize_t
  58. kernel_marker_on_simple_read(struct file *filp, char __user *ubuf,
  59. size_t cnt, loff_t *ppos)
  60. {
  61. char buf[64];
  62. int r;
  63. r = sprintf(buf, "%d\n", kernel_marker_on);
  64. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  65. }
  66. static ssize_t
  67. kernel_marker_on_simple_write(struct file *filp, const char __user *ubuf,
  68. size_t cnt, loff_t *ppos)
  69. {
  70. unsigned long val;
  71. int ret;
  72. ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  73. if (ret)
  74. return ret;
  75. kernel_marker_on = !!val;
  76. if (kernel_marker_on && !mark_addr)
  77. update_tracing_mark_write_addr();
  78. (*ppos)++;
  79. return cnt;
  80. }
  81. static const struct file_operations kernel_marker_on_simple_fops = {
  82. .open = tracing_open_generic,
  83. .read = kernel_marker_on_simple_read,
  84. .write = kernel_marker_on_simple_write,
  85. .llseek = default_llseek,
  86. };
  87. static __init int init_kernel_marker(void)
  88. {
  89. struct trace_array *tr;
  90. struct dentry *d_tracer;
  91. tr = top_trace_array();
  92. d_tracer = tracing_init_dentry();
  93. if (!tr || IS_ERR(d_tracer))
  94. return 0;
  95. trace_create_file("kernel_marker_on", 0644, d_tracer, tr,
  96. &kernel_marker_on_simple_fops);
  97. update_tracing_mark_write_addr();
  98. return 0;
  99. }
  100. fs_initcall(init_kernel_marker);
  101. #endif
  102. #if defined(CONFIG_MTK_HIBERNATION) && defined(CONFIG_MTK_SCHED_TRACERS)
  103. int resize_ring_buffer_for_hibernation(int enable)
  104. {
  105. int ret = 0;
  106. struct trace_array *tr = NULL;
  107. if (enable) {
  108. ring_buffer_expanded = 0;
  109. ret = tracing_update_buffers();
  110. } else {
  111. tr = top_trace_array();
  112. if (!tr)
  113. return -ENODEV;
  114. ret = tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
  115. }
  116. return ret;
  117. }
  118. #endif
  119. #ifdef CONFIG_MTK_SCHED_TRACERS
  120. static unsigned long buf_size = 25165824UL;
  121. static bool boot_trace;
  122. static __init int boot_trace_cmdline(char *str)
  123. {
  124. boot_trace = true;
  125. update_buf_size(buf_size);
  126. return 0;
  127. }
  128. __setup("androidboot.boot_trace", boot_trace_cmdline);
  129. /* If boot tracing is on.Ignore tracing off command.*/
  130. bool boot_ftrace_check(unsigned long trace_en)
  131. {
  132. bool boot_complete = false;
  133. if (boot_trace != true || trace_en)
  134. return false;
  135. #ifdef CONFIG_MTPROF
  136. boot_complete = boot_finish;
  137. #endif
  138. if (!boot_complete) {
  139. pr_info("Capturing boot ftrace,Ignore tracing off.\n");
  140. return true;
  141. }
  142. return false;
  143. }
  144. #include <linux/rtc.h>
  145. #include <linux/sched.h>
  146. #include <linux/sched/clock.h>
  147. #include <linux/sched/stat.h>
  148. void print_enabled_events(struct trace_buffer *buf, struct seq_file *m)
  149. {
  150. struct trace_event_call *call;
  151. struct trace_event_file *file;
  152. struct trace_array *tr;
  153. unsigned long usec_rem;
  154. unsigned long long t;
  155. struct rtc_time tm_utc, tm;
  156. struct timeval tv = { 0 };
  157. if (buf->tr)
  158. tr = buf->tr;
  159. else
  160. return;
  161. if (tr->name != NULL)
  162. seq_printf(m, "# instance: %s, enabled events:", tr->name);
  163. else
  164. seq_puts(m, "# enabled events:");
  165. list_for_each_entry(file, &tr->events, list) {
  166. call = file->event_call;
  167. if (file->flags & EVENT_FILE_FL_ENABLED)
  168. seq_printf(m, " %s:%s", call->class->system,
  169. trace_event_name(call));
  170. }
  171. seq_puts(m, "\n");
  172. t = sched_clock();
  173. do_gettimeofday(&tv);
  174. t = ns2usecs(t);
  175. usec_rem = do_div(t, USEC_PER_SEC);
  176. rtc_time_to_tm(tv.tv_sec, &tm_utc);
  177. rtc_time_to_tm(tv.tv_sec - sys_tz.tz_minuteswest * 60, &tm);
  178. seq_printf(m, "# kernel time now: %5llu.%06lu\n",
  179. t, usec_rem);
  180. seq_printf(m, "# UTC time:\t%d-%02d-%02d %02d:%02d:%02d.%03u\n",
  181. tm_utc.tm_year + 1900, tm_utc.tm_mon + 1,
  182. tm_utc.tm_mday, tm_utc.tm_hour,
  183. tm_utc.tm_min, tm_utc.tm_sec,
  184. (unsigned int)tv.tv_usec);
  185. seq_printf(m, "# android time:\t%d-%02d-%02d %02d:%02d:%02d.%03u\n",
  186. tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
  187. tm.tm_hour, tm.tm_min, tm.tm_sec,
  188. (unsigned int)tv.tv_usec);
  189. }
  190. /* ftrace's switch function for MTK solution */
  191. static void ftrace_events_enable(int enable)
  192. {
  193. if (enable) {
  194. trace_set_clr_event(NULL, "sched_switch", 1);
  195. trace_set_clr_event(NULL, "sched_wakeup", 1);
  196. trace_set_clr_event(NULL, "sched_wakeup_new", 1);
  197. #ifdef CONFIG_SMP
  198. trace_set_clr_event(NULL, "sched_migrate_task", 1);
  199. #endif
  200. trace_set_clr_event(NULL, "workqueue_execute_start", 1);
  201. trace_set_clr_event(NULL, "workqueue_execute_end", 1);
  202. trace_set_clr_event(NULL, "cpu_frequency", 1);
  203. trace_set_clr_event(NULL, "block_bio_frontmerge", 1);
  204. trace_set_clr_event(NULL, "block_bio_backmerge", 1);
  205. trace_set_clr_event(NULL, "block_rq_issue", 1);
  206. trace_set_clr_event(NULL, "block_rq_insert", 1);
  207. trace_set_clr_event(NULL, "block_rq_complete", 1);
  208. trace_set_clr_event(NULL, "block_rq_requeue", 1);
  209. trace_set_clr_event(NULL, "debug_allocate_large_pages", 1);
  210. trace_set_clr_event(NULL, "dump_allocate_large_pages", 1);
  211. trace_set_clr_event("mtk_events", NULL, 1);
  212. if (boot_trace) {
  213. trace_set_clr_event("android_fs", NULL, 1);
  214. trace_set_clr_event(NULL, "sched_blocked_reason", 1);
  215. /*trace_set_clr_event(NULL, "sched_waking", 1);*/
  216. } else {
  217. trace_set_clr_event("ipi", NULL, 1);
  218. trace_set_clr_event(NULL, "softirq_entry", 1);
  219. trace_set_clr_event(NULL, "softirq_exit", 1);
  220. trace_set_clr_event(NULL, "softirq_raise", 1);
  221. trace_set_clr_event(NULL, "irq_handler_entry", 1);
  222. trace_set_clr_event(NULL, "irq_handler_exit", 1);
  223. #ifdef CONFIG_MTK_SCHED_MONITOR
  224. trace_set_clr_event(NULL, "sched_mon_msg", 1);
  225. #endif
  226. #ifdef CONFIG_LOCKDEP
  227. trace_set_clr_event(NULL, "lock_dbg", 1);
  228. trace_set_clr_event(NULL, "lock_monitor", 1);
  229. #endif
  230. trace_set_clr_event("met_bio", NULL, 1);
  231. trace_set_clr_event("met_fuse", NULL, 1);
  232. }
  233. tracing_on();
  234. } else {
  235. tracing_off();
  236. trace_set_clr_event(NULL, NULL, 0);
  237. }
  238. }
  239. static __init int boot_ftrace(void)
  240. {
  241. struct trace_array *tr;
  242. int ret;
  243. if (boot_trace) {
  244. tr = top_trace_array();
  245. if (!tr) {
  246. pr_info("[ftrace]Error: Tracer list is empty.\n");
  247. return 0;
  248. }
  249. #ifdef CONFIG_MTK_PERF_TRACKER
  250. perf_tracker_enable(1);
  251. #endif
  252. ret = tracing_update_buffers();
  253. if (ret != 0)
  254. pr_debug("unable to expand buffer, ret=%d\n", ret);
  255. #ifdef CONFIG_SCHEDSTATS
  256. force_schedstat_enabled();
  257. #endif
  258. ftrace_events_enable(1);
  259. set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 0);
  260. pr_debug("[ftrace]boot-time profiling...\n");
  261. }
  262. return 0;
  263. }
  264. core_initcall(boot_ftrace);
  265. #ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE
  266. static __init int enable_ftrace(void)
  267. {
  268. int ret;
  269. if (!boot_trace) {
  270. /* enable ftrace facilities */
  271. ftrace_events_enable(1);
  272. /*
  273. * only update buffer eariler
  274. * if we want to collect boot-time ftrace
  275. * to avoid the boot time impacted by
  276. * early-expanded ring buffer
  277. */
  278. ret = tracing_update_buffers();
  279. if (ret != 0)
  280. pr_debug("fail to update buffer, ret=%d\n",
  281. ret);
  282. else
  283. pr_debug("[ftrace]ftrace ready...\n");
  284. }
  285. return 0;
  286. }
  287. late_initcall(enable_ftrace);
  288. #endif
  289. #endif