trace_irqsoff.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * trace irqs off critical timings
  4. *
  5. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7. *
  8. * From code in the latency_tracer, that is:
  9. *
  10. * Copyright (C) 2004-2006 Ingo Molnar
  11. * Copyright (C) 2004 Nadia Yvette Chambers
  12. */
  13. #include <linux/kallsyms.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ftrace.h>
  17. #include "trace.h"
  18. #include <trace/events/preemptirq.h>
  19. #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
  20. static struct trace_array *irqsoff_trace __read_mostly;
  21. static int tracer_enabled __read_mostly;
  22. static DEFINE_PER_CPU(int, tracing_cpu);
  23. static DEFINE_RAW_SPINLOCK(max_trace_lock);
  24. enum {
  25. TRACER_IRQS_OFF = (1 << 1),
  26. TRACER_PREEMPT_OFF = (1 << 2),
  27. };
  28. static int trace_type __read_mostly;
  29. static int save_flags;
  30. static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
  31. static int start_irqsoff_tracer(struct trace_array *tr, int graph);
  32. #ifdef CONFIG_PREEMPT_TRACER
  33. static inline int
  34. preempt_trace(int pc)
  35. {
  36. return ((trace_type & TRACER_PREEMPT_OFF) && pc);
  37. }
  38. #else
  39. # define preempt_trace(pc) (0)
  40. #endif
  41. #ifdef CONFIG_IRQSOFF_TRACER
  42. static inline int
  43. irq_trace(void)
  44. {
  45. return ((trace_type & TRACER_IRQS_OFF) &&
  46. irqs_disabled());
  47. }
  48. #else
  49. # define irq_trace() (0)
  50. #endif
  51. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  52. static int irqsoff_display_graph(struct trace_array *tr, int set);
  53. # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
  54. #else
  55. static inline int irqsoff_display_graph(struct trace_array *tr, int set)
  56. {
  57. return -EINVAL;
  58. }
  59. # define is_graph(tr) false
  60. #endif
  61. /*
  62. * Sequence count - we record it when starting a measurement and
  63. * skip the latency if the sequence has changed - some other section
  64. * did a maximum and could disturb our measurement with serial console
  65. * printouts, etc. Truly coinciding maximum latencies should be rare
  66. * and what happens together happens separately as well, so this doesn't
  67. * decrease the validity of the maximum found:
  68. */
  69. static __cacheline_aligned_in_smp unsigned long max_sequence;
  70. #ifdef CONFIG_FUNCTION_TRACER
  71. /*
  72. * Prologue for the preempt and irqs off function tracers.
  73. *
  74. * Returns 1 if it is OK to continue, and data->disabled is
  75. * incremented.
  76. * 0 if the trace is to be ignored, and data->disabled
  77. * is kept the same.
  78. *
  79. * Note, this function is also used outside this ifdef but
  80. * inside the #ifdef of the function graph tracer below.
  81. * This is OK, since the function graph tracer is
  82. * dependent on the function tracer.
  83. */
  84. static int func_prolog_dec(struct trace_array *tr,
  85. struct trace_array_cpu **data,
  86. unsigned long *flags)
  87. {
  88. long disabled;
  89. int cpu;
  90. /*
  91. * Does not matter if we preempt. We test the flags
  92. * afterward, to see if irqs are disabled or not.
  93. * If we preempt and get a false positive, the flags
  94. * test will fail.
  95. */
  96. cpu = raw_smp_processor_id();
  97. if (likely(!per_cpu(tracing_cpu, cpu)))
  98. return 0;
  99. local_save_flags(*flags);
  100. /*
  101. * Slight chance to get a false positive on tracing_cpu,
  102. * although I'm starting to think there isn't a chance.
  103. * Leave this for now just to be paranoid.
  104. */
  105. if (!irqs_disabled_flags(*flags) && !preempt_count())
  106. return 0;
  107. *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  108. disabled = atomic_inc_return(&(*data)->disabled);
  109. if (likely(disabled == 1))
  110. return 1;
  111. atomic_dec(&(*data)->disabled);
  112. return 0;
  113. }
  114. /*
  115. * irqsoff uses its own tracer function to keep the overhead down:
  116. */
  117. static void
  118. irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
  119. struct ftrace_ops *op, struct pt_regs *pt_regs)
  120. {
  121. struct trace_array *tr = irqsoff_trace;
  122. struct trace_array_cpu *data;
  123. unsigned long flags;
  124. if (!func_prolog_dec(tr, &data, &flags))
  125. return;
  126. trace_function(tr, ip, parent_ip, flags, preempt_count());
  127. atomic_dec(&data->disabled);
  128. }
  129. #endif /* CONFIG_FUNCTION_TRACER */
  130. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  131. static int irqsoff_display_graph(struct trace_array *tr, int set)
  132. {
  133. int cpu;
  134. if (!(is_graph(tr) ^ set))
  135. return 0;
  136. stop_irqsoff_tracer(irqsoff_trace, !set);
  137. for_each_possible_cpu(cpu)
  138. per_cpu(tracing_cpu, cpu) = 0;
  139. tr->max_latency = 0;
  140. tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
  141. return start_irqsoff_tracer(irqsoff_trace, set);
  142. }
  143. static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
  144. {
  145. struct trace_array *tr = irqsoff_trace;
  146. struct trace_array_cpu *data;
  147. unsigned long flags;
  148. int ret;
  149. int pc;
  150. if (ftrace_graph_ignore_func(trace))
  151. return 0;
  152. /*
  153. * Do not trace a function if it's filtered by set_graph_notrace.
  154. * Make the index of ret stack negative to indicate that it should
  155. * ignore further functions. But it needs its own ret stack entry
  156. * to recover the original index in order to continue tracing after
  157. * returning from the function.
  158. */
  159. if (ftrace_graph_notrace_addr(trace->func))
  160. return 1;
  161. if (!func_prolog_dec(tr, &data, &flags))
  162. return 0;
  163. pc = preempt_count();
  164. ret = __trace_graph_entry(tr, trace, flags, pc);
  165. atomic_dec(&data->disabled);
  166. return ret;
  167. }
  168. static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
  169. {
  170. struct trace_array *tr = irqsoff_trace;
  171. struct trace_array_cpu *data;
  172. unsigned long flags;
  173. int pc;
  174. ftrace_graph_addr_finish(trace);
  175. if (!func_prolog_dec(tr, &data, &flags))
  176. return;
  177. pc = preempt_count();
  178. __trace_graph_return(tr, trace, flags, pc);
  179. atomic_dec(&data->disabled);
  180. }
  181. static void irqsoff_trace_open(struct trace_iterator *iter)
  182. {
  183. if (is_graph(iter->tr))
  184. graph_trace_open(iter);
  185. }
  186. static void irqsoff_trace_close(struct trace_iterator *iter)
  187. {
  188. if (iter->private)
  189. graph_trace_close(iter);
  190. }
  191. #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
  192. TRACE_GRAPH_PRINT_PROC | \
  193. TRACE_GRAPH_PRINT_ABS_TIME | \
  194. TRACE_GRAPH_PRINT_DURATION)
  195. static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
  196. {
  197. /*
  198. * In graph mode call the graph tracer output function,
  199. * otherwise go with the TRACE_FN event handler
  200. */
  201. if (is_graph(iter->tr))
  202. return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
  203. return TRACE_TYPE_UNHANDLED;
  204. }
  205. static void irqsoff_print_header(struct seq_file *s)
  206. {
  207. struct trace_array *tr = irqsoff_trace;
  208. if (is_graph(tr))
  209. print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
  210. else
  211. trace_default_header(s);
  212. }
  213. static void
  214. __trace_function(struct trace_array *tr,
  215. unsigned long ip, unsigned long parent_ip,
  216. unsigned long flags, int pc)
  217. {
  218. if (is_graph(tr))
  219. trace_graph_function(tr, ip, parent_ip, flags, pc);
  220. else
  221. trace_function(tr, ip, parent_ip, flags, pc);
  222. }
  223. #else
  224. #define __trace_function trace_function
  225. #ifdef CONFIG_FUNCTION_TRACER
  226. static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
  227. {
  228. return -1;
  229. }
  230. #endif
  231. static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
  232. {
  233. return TRACE_TYPE_UNHANDLED;
  234. }
  235. static void irqsoff_trace_open(struct trace_iterator *iter) { }
  236. static void irqsoff_trace_close(struct trace_iterator *iter) { }
  237. #ifdef CONFIG_FUNCTION_TRACER
  238. static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
  239. static void irqsoff_print_header(struct seq_file *s)
  240. {
  241. trace_default_header(s);
  242. }
  243. #else
  244. static void irqsoff_print_header(struct seq_file *s)
  245. {
  246. trace_latency_header(s);
  247. }
  248. #endif /* CONFIG_FUNCTION_TRACER */
  249. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  250. /*
  251. * Should this new latency be reported/recorded?
  252. */
  253. static bool report_latency(struct trace_array *tr, u64 delta)
  254. {
  255. if (tracing_thresh) {
  256. if (delta < tracing_thresh)
  257. return false;
  258. } else {
  259. if (delta <= tr->max_latency)
  260. return false;
  261. }
  262. return true;
  263. }
  264. static void
  265. check_critical_timing(struct trace_array *tr,
  266. struct trace_array_cpu *data,
  267. unsigned long parent_ip,
  268. int cpu)
  269. {
  270. u64 T0, T1, delta;
  271. unsigned long flags;
  272. int pc;
  273. T0 = data->preempt_timestamp;
  274. T1 = ftrace_now(cpu);
  275. delta = T1-T0;
  276. local_save_flags(flags);
  277. pc = preempt_count();
  278. if (!report_latency(tr, delta))
  279. goto out;
  280. raw_spin_lock_irqsave(&max_trace_lock, flags);
  281. /* check if we are still the max latency */
  282. if (!report_latency(tr, delta))
  283. goto out_unlock;
  284. __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
  285. /* Skip 5 functions to get to the irq/preempt enable function */
  286. __trace_stack(tr, flags, 5, pc);
  287. if (data->critical_sequence != max_sequence)
  288. goto out_unlock;
  289. data->critical_end = parent_ip;
  290. if (likely(!is_tracing_stopped())) {
  291. tr->max_latency = delta;
  292. update_max_tr_single(tr, current, cpu);
  293. }
  294. max_sequence++;
  295. out_unlock:
  296. raw_spin_unlock_irqrestore(&max_trace_lock, flags);
  297. out:
  298. data->critical_sequence = max_sequence;
  299. data->preempt_timestamp = ftrace_now(cpu);
  300. __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
  301. }
  302. static inline void
  303. start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
  304. {
  305. int cpu;
  306. struct trace_array *tr = irqsoff_trace;
  307. struct trace_array_cpu *data;
  308. unsigned long flags;
  309. if (!tracer_enabled || !tracing_is_enabled())
  310. return;
  311. cpu = raw_smp_processor_id();
  312. if (per_cpu(tracing_cpu, cpu))
  313. return;
  314. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  315. if (unlikely(!data) || atomic_read(&data->disabled))
  316. return;
  317. atomic_inc(&data->disabled);
  318. data->critical_sequence = max_sequence;
  319. data->preempt_timestamp = ftrace_now(cpu);
  320. data->critical_start = parent_ip ? : ip;
  321. local_save_flags(flags);
  322. __trace_function(tr, ip, parent_ip, flags, pc);
  323. per_cpu(tracing_cpu, cpu) = 1;
  324. atomic_dec(&data->disabled);
  325. }
  326. static inline void
  327. stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
  328. {
  329. int cpu;
  330. struct trace_array *tr = irqsoff_trace;
  331. struct trace_array_cpu *data;
  332. unsigned long flags;
  333. cpu = raw_smp_processor_id();
  334. /* Always clear the tracing cpu on stopping the trace */
  335. if (unlikely(per_cpu(tracing_cpu, cpu)))
  336. per_cpu(tracing_cpu, cpu) = 0;
  337. else
  338. return;
  339. if (!tracer_enabled || !tracing_is_enabled())
  340. return;
  341. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  342. if (unlikely(!data) ||
  343. !data->critical_start || atomic_read(&data->disabled))
  344. return;
  345. atomic_inc(&data->disabled);
  346. local_save_flags(flags);
  347. __trace_function(tr, ip, parent_ip, flags, pc);
  348. check_critical_timing(tr, data, parent_ip ? : ip, cpu);
  349. data->critical_start = 0;
  350. atomic_dec(&data->disabled);
  351. }
  352. /* start and stop critical timings used to for stoppage (in idle) */
  353. void start_critical_timings(void)
  354. {
  355. int pc = preempt_count();
  356. if (preempt_trace(pc) || irq_trace())
  357. start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
  358. }
  359. EXPORT_SYMBOL_GPL(start_critical_timings);
  360. void stop_critical_timings(void)
  361. {
  362. int pc = preempt_count();
  363. if (preempt_trace(pc) || irq_trace())
  364. stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
  365. }
  366. EXPORT_SYMBOL_GPL(stop_critical_timings);
  367. #ifdef CONFIG_FUNCTION_TRACER
  368. static bool function_enabled;
  369. static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
  370. {
  371. int ret;
  372. /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
  373. if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
  374. return 0;
  375. if (graph)
  376. ret = register_ftrace_graph(&irqsoff_graph_return,
  377. &irqsoff_graph_entry);
  378. else
  379. ret = register_ftrace_function(tr->ops);
  380. if (!ret)
  381. function_enabled = true;
  382. return ret;
  383. }
  384. static void unregister_irqsoff_function(struct trace_array *tr, int graph)
  385. {
  386. if (!function_enabled)
  387. return;
  388. if (graph)
  389. unregister_ftrace_graph();
  390. else
  391. unregister_ftrace_function(tr->ops);
  392. function_enabled = false;
  393. }
  394. static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
  395. {
  396. if (!(mask & TRACE_ITER_FUNCTION))
  397. return 0;
  398. if (set)
  399. register_irqsoff_function(tr, is_graph(tr), 1);
  400. else
  401. unregister_irqsoff_function(tr, is_graph(tr));
  402. return 1;
  403. }
  404. #else
  405. static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
  406. {
  407. return 0;
  408. }
  409. static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
  410. static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
  411. {
  412. return 0;
  413. }
  414. #endif /* CONFIG_FUNCTION_TRACER */
  415. static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
  416. {
  417. struct tracer *tracer = tr->current_trace;
  418. if (irqsoff_function_set(tr, mask, set))
  419. return 0;
  420. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  421. if (mask & TRACE_ITER_DISPLAY_GRAPH)
  422. return irqsoff_display_graph(tr, set);
  423. #endif
  424. return trace_keep_overwrite(tracer, mask, set);
  425. }
  426. static int start_irqsoff_tracer(struct trace_array *tr, int graph)
  427. {
  428. int ret;
  429. ret = register_irqsoff_function(tr, graph, 0);
  430. if (!ret && tracing_is_enabled())
  431. tracer_enabled = 1;
  432. else
  433. tracer_enabled = 0;
  434. return ret;
  435. }
  436. static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
  437. {
  438. tracer_enabled = 0;
  439. unregister_irqsoff_function(tr, graph);
  440. }
  441. static bool irqsoff_busy;
  442. static int __irqsoff_tracer_init(struct trace_array *tr)
  443. {
  444. if (irqsoff_busy)
  445. return -EBUSY;
  446. save_flags = tr->trace_flags;
  447. /* non overwrite screws up the latency tracers */
  448. set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
  449. set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
  450. tr->max_latency = 0;
  451. irqsoff_trace = tr;
  452. /* make sure that the tracer is visible */
  453. smp_wmb();
  454. ftrace_init_array_ops(tr, irqsoff_tracer_call);
  455. /* Only toplevel instance supports graph tracing */
  456. if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
  457. is_graph(tr))))
  458. printk(KERN_ERR "failed to start irqsoff tracer\n");
  459. irqsoff_busy = true;
  460. return 0;
  461. }
  462. static void __irqsoff_tracer_reset(struct trace_array *tr)
  463. {
  464. int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
  465. int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
  466. stop_irqsoff_tracer(tr, is_graph(tr));
  467. set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
  468. set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
  469. ftrace_reset_array_ops(tr);
  470. irqsoff_busy = false;
  471. }
  472. static void irqsoff_tracer_start(struct trace_array *tr)
  473. {
  474. tracer_enabled = 1;
  475. }
  476. static void irqsoff_tracer_stop(struct trace_array *tr)
  477. {
  478. tracer_enabled = 0;
  479. }
  480. #ifdef CONFIG_IRQSOFF_TRACER
  481. /*
  482. * We are only interested in hardirq on/off events:
  483. */
  484. void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
  485. {
  486. unsigned int pc = preempt_count();
  487. if (!preempt_trace(pc) && irq_trace())
  488. stop_critical_timing(a0, a1, pc);
  489. }
  490. void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
  491. {
  492. unsigned int pc = preempt_count();
  493. if (!preempt_trace(pc) && irq_trace())
  494. start_critical_timing(a0, a1, pc);
  495. }
  496. static int irqsoff_tracer_init(struct trace_array *tr)
  497. {
  498. trace_type = TRACER_IRQS_OFF;
  499. return __irqsoff_tracer_init(tr);
  500. }
  501. static void irqsoff_tracer_reset(struct trace_array *tr)
  502. {
  503. __irqsoff_tracer_reset(tr);
  504. }
  505. static struct tracer irqsoff_tracer __read_mostly =
  506. {
  507. .name = "irqsoff",
  508. .init = irqsoff_tracer_init,
  509. .reset = irqsoff_tracer_reset,
  510. .start = irqsoff_tracer_start,
  511. .stop = irqsoff_tracer_stop,
  512. .print_max = true,
  513. .print_header = irqsoff_print_header,
  514. .print_line = irqsoff_print_line,
  515. .flag_changed = irqsoff_flag_changed,
  516. #ifdef CONFIG_FTRACE_SELFTEST
  517. .selftest = trace_selftest_startup_irqsoff,
  518. #endif
  519. .open = irqsoff_trace_open,
  520. .close = irqsoff_trace_close,
  521. .allow_instances = true,
  522. .use_max_tr = true,
  523. };
  524. #endif /* CONFIG_IRQSOFF_TRACER */
  525. #ifdef CONFIG_PREEMPT_TRACER
  526. void tracer_preempt_on(unsigned long a0, unsigned long a1)
  527. {
  528. int pc = preempt_count();
  529. if (preempt_trace(pc) && !irq_trace())
  530. stop_critical_timing(a0, a1, pc);
  531. }
  532. void tracer_preempt_off(unsigned long a0, unsigned long a1)
  533. {
  534. int pc = preempt_count();
  535. if (preempt_trace(pc) && !irq_trace())
  536. start_critical_timing(a0, a1, pc);
  537. }
  538. static int preemptoff_tracer_init(struct trace_array *tr)
  539. {
  540. trace_type = TRACER_PREEMPT_OFF;
  541. return __irqsoff_tracer_init(tr);
  542. }
  543. static void preemptoff_tracer_reset(struct trace_array *tr)
  544. {
  545. __irqsoff_tracer_reset(tr);
  546. }
  547. static struct tracer preemptoff_tracer __read_mostly =
  548. {
  549. .name = "preemptoff",
  550. .init = preemptoff_tracer_init,
  551. .reset = preemptoff_tracer_reset,
  552. .start = irqsoff_tracer_start,
  553. .stop = irqsoff_tracer_stop,
  554. .print_max = true,
  555. .print_header = irqsoff_print_header,
  556. .print_line = irqsoff_print_line,
  557. .flag_changed = irqsoff_flag_changed,
  558. #ifdef CONFIG_FTRACE_SELFTEST
  559. .selftest = trace_selftest_startup_preemptoff,
  560. #endif
  561. .open = irqsoff_trace_open,
  562. .close = irqsoff_trace_close,
  563. .allow_instances = true,
  564. .use_max_tr = true,
  565. };
  566. #endif /* CONFIG_PREEMPT_TRACER */
  567. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  568. static int preemptirqsoff_tracer_init(struct trace_array *tr)
  569. {
  570. trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
  571. return __irqsoff_tracer_init(tr);
  572. }
  573. static void preemptirqsoff_tracer_reset(struct trace_array *tr)
  574. {
  575. __irqsoff_tracer_reset(tr);
  576. }
  577. static struct tracer preemptirqsoff_tracer __read_mostly =
  578. {
  579. .name = "preemptirqsoff",
  580. .init = preemptirqsoff_tracer_init,
  581. .reset = preemptirqsoff_tracer_reset,
  582. .start = irqsoff_tracer_start,
  583. .stop = irqsoff_tracer_stop,
  584. .print_max = true,
  585. .print_header = irqsoff_print_header,
  586. .print_line = irqsoff_print_line,
  587. .flag_changed = irqsoff_flag_changed,
  588. #ifdef CONFIG_FTRACE_SELFTEST
  589. .selftest = trace_selftest_startup_preemptirqsoff,
  590. #endif
  591. .open = irqsoff_trace_open,
  592. .close = irqsoff_trace_close,
  593. .allow_instances = true,
  594. .use_max_tr = true,
  595. };
  596. #endif
  597. __init static int init_irqsoff_tracer(void)
  598. {
  599. #ifdef CONFIG_IRQSOFF_TRACER
  600. register_tracer(&irqsoff_tracer);
  601. #endif
  602. #ifdef CONFIG_PREEMPT_TRACER
  603. register_tracer(&preemptoff_tracer);
  604. #endif
  605. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  606. register_tracer(&preemptirqsoff_tracer);
  607. #endif
  608. return 0;
  609. }
  610. core_initcall(init_irqsoff_tracer);
  611. #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */