rcu.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #undef TRACE_SYSTEM
  3. #define TRACE_SYSTEM rcu
  4. #if !defined(_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
  5. #define _TRACE_RCU_H
  6. #include <linux/tracepoint.h>
  7. /*
  8. * Tracepoint for start/end markers used for utilization calculations.
  9. * By convention, the string is of the following forms:
  10. *
  11. * "Start <activity>" -- Mark the start of the specified activity,
  12. * such as "context switch". Nesting is permitted.
  13. * "End <activity>" -- Mark the end of the specified activity.
  14. *
  15. * An "@" character within "<activity>" is a comment character: Data
  16. * reduction scripts will ignore the "@" and the remainder of the line.
  17. */
  18. TRACE_EVENT(rcu_utilization,
  19. TP_PROTO(const char *s),
  20. TP_ARGS(s),
  21. TP_STRUCT__entry(
  22. __field(const char *, s)
  23. ),
  24. TP_fast_assign(
  25. __entry->s = s;
  26. ),
  27. TP_printk("%s", __entry->s)
  28. );
  29. #ifdef CONFIG_RCU_TRACE
  30. #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
  31. /*
  32. * Tracepoint for grace-period events. Takes a string identifying the
  33. * RCU flavor, the grace-period number, and a string identifying the
  34. * grace-period-related event as follows:
  35. *
  36. * "AccReadyCB": CPU acclerates new callbacks to RCU_NEXT_READY_TAIL.
  37. * "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL.
  38. * "newreq": Request a new grace period.
  39. * "start": Start a grace period.
  40. * "cpustart": CPU first notices a grace-period start.
  41. * "cpuqs": CPU passes through a quiescent state.
  42. * "cpuonl": CPU comes online.
  43. * "cpuofl": CPU goes offline.
  44. * "cpuofl-bgp": CPU goes offline while blocking a grace period.
  45. * "reqwait": GP kthread sleeps waiting for grace-period request.
  46. * "reqwaitsig": GP kthread awakened by signal from reqwait state.
  47. * "fqswait": GP kthread waiting until time to force quiescent states.
  48. * "fqsstart": GP kthread starts forcing quiescent states.
  49. * "fqsend": GP kthread done forcing quiescent states.
  50. * "fqswaitsig": GP kthread awakened by signal from fqswait state.
  51. * "end": End a grace period.
  52. * "cpuend": CPU first notices a grace-period end.
  53. */
  54. TRACE_EVENT(rcu_grace_period,
  55. TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
  56. TP_ARGS(rcuname, gp_seq, gpevent),
  57. TP_STRUCT__entry(
  58. __field(const char *, rcuname)
  59. __field(unsigned long, gp_seq)
  60. __field(const char *, gpevent)
  61. ),
  62. TP_fast_assign(
  63. __entry->rcuname = rcuname;
  64. __entry->gp_seq = gp_seq;
  65. __entry->gpevent = gpevent;
  66. ),
  67. TP_printk("%s %lu %s",
  68. __entry->rcuname, __entry->gp_seq, __entry->gpevent)
  69. );
  70. /*
  71. * Tracepoint for future grace-period events. The caller should pull
  72. * the data from the rcu_node structure, other than rcuname, which comes
  73. * from the rcu_state structure, and event, which is one of the following:
  74. *
  75. * "Startleaf": Request a grace period based on leaf-node data.
  76. * "Prestarted": Someone beat us to the request
  77. * "Startedleaf": Leaf node marked for future GP.
  78. * "Startedleafroot": All nodes from leaf to root marked for future GP.
  79. * "Startedroot": Requested a nocb grace period based on root-node data.
  80. * "NoGPkthread": The RCU grace-period kthread has not yet started.
  81. * "StartWait": Start waiting for the requested grace period.
  82. * "ResumeWait": Resume waiting after signal.
  83. * "EndWait": Complete wait.
  84. * "Cleanup": Clean up rcu_node structure after previous GP.
  85. * "CleanupMore": Clean up, and another GP is needed.
  86. */
  87. TRACE_EVENT(rcu_future_grace_period,
  88. TP_PROTO(const char *rcuname, unsigned long gp_seq,
  89. unsigned long gp_seq_req, u8 level, int grplo, int grphi,
  90. const char *gpevent),
  91. TP_ARGS(rcuname, gp_seq, gp_seq_req, level, grplo, grphi, gpevent),
  92. TP_STRUCT__entry(
  93. __field(const char *, rcuname)
  94. __field(unsigned long, gp_seq)
  95. __field(unsigned long, gp_seq_req)
  96. __field(u8, level)
  97. __field(int, grplo)
  98. __field(int, grphi)
  99. __field(const char *, gpevent)
  100. ),
  101. TP_fast_assign(
  102. __entry->rcuname = rcuname;
  103. __entry->gp_seq = gp_seq;
  104. __entry->gp_seq_req = gp_seq_req;
  105. __entry->level = level;
  106. __entry->grplo = grplo;
  107. __entry->grphi = grphi;
  108. __entry->gpevent = gpevent;
  109. ),
  110. TP_printk("%s %lu %lu %u %d %d %s",
  111. __entry->rcuname, __entry->gp_seq, __entry->gp_seq_req, __entry->level,
  112. __entry->grplo, __entry->grphi, __entry->gpevent)
  113. );
  114. /*
  115. * Tracepoint for grace-period-initialization events. These are
  116. * distinguished by the type of RCU, the new grace-period number, the
  117. * rcu_node structure level, the starting and ending CPU covered by the
  118. * rcu_node structure, and the mask of CPUs that will be waited for.
  119. * All but the type of RCU are extracted from the rcu_node structure.
  120. */
  121. TRACE_EVENT(rcu_grace_period_init,
  122. TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
  123. int grplo, int grphi, unsigned long qsmask),
  124. TP_ARGS(rcuname, gp_seq, level, grplo, grphi, qsmask),
  125. TP_STRUCT__entry(
  126. __field(const char *, rcuname)
  127. __field(unsigned long, gp_seq)
  128. __field(u8, level)
  129. __field(int, grplo)
  130. __field(int, grphi)
  131. __field(unsigned long, qsmask)
  132. ),
  133. TP_fast_assign(
  134. __entry->rcuname = rcuname;
  135. __entry->gp_seq = gp_seq;
  136. __entry->level = level;
  137. __entry->grplo = grplo;
  138. __entry->grphi = grphi;
  139. __entry->qsmask = qsmask;
  140. ),
  141. TP_printk("%s %lu %u %d %d %lx",
  142. __entry->rcuname, __entry->gp_seq, __entry->level,
  143. __entry->grplo, __entry->grphi, __entry->qsmask)
  144. );
  145. /*
  146. * Tracepoint for expedited grace-period events. Takes a string identifying
  147. * the RCU flavor, the expedited grace-period sequence number, and a string
  148. * identifying the grace-period-related event as follows:
  149. *
  150. * "snap": Captured snapshot of expedited grace period sequence number.
  151. * "start": Started a real expedited grace period.
  152. * "reset": Started resetting the tree
  153. * "select": Started selecting the CPUs to wait on.
  154. * "selectofl": Selected CPU partially offline.
  155. * "startwait": Started waiting on selected CPUs.
  156. * "end": Ended a real expedited grace period.
  157. * "endwake": Woke piggybackers up.
  158. * "done": Someone else did the expedited grace period for us.
  159. */
  160. TRACE_EVENT(rcu_exp_grace_period,
  161. TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent),
  162. TP_ARGS(rcuname, gpseq, gpevent),
  163. TP_STRUCT__entry(
  164. __field(const char *, rcuname)
  165. __field(unsigned long, gpseq)
  166. __field(const char *, gpevent)
  167. ),
  168. TP_fast_assign(
  169. __entry->rcuname = rcuname;
  170. __entry->gpseq = gpseq;
  171. __entry->gpevent = gpevent;
  172. ),
  173. TP_printk("%s %lu %s",
  174. __entry->rcuname, __entry->gpseq, __entry->gpevent)
  175. );
  176. /*
  177. * Tracepoint for expedited grace-period funnel-locking events. Takes a
  178. * string identifying the RCU flavor, an integer identifying the rcu_node
  179. * combining-tree level, another pair of integers identifying the lowest-
  180. * and highest-numbered CPU associated with the current rcu_node structure,
  181. * and a string. identifying the grace-period-related event as follows:
  182. *
  183. * "nxtlvl": Advance to next level of rcu_node funnel
  184. * "wait": Wait for someone else to do expedited GP
  185. */
  186. TRACE_EVENT(rcu_exp_funnel_lock,
  187. TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi,
  188. const char *gpevent),
  189. TP_ARGS(rcuname, level, grplo, grphi, gpevent),
  190. TP_STRUCT__entry(
  191. __field(const char *, rcuname)
  192. __field(u8, level)
  193. __field(int, grplo)
  194. __field(int, grphi)
  195. __field(const char *, gpevent)
  196. ),
  197. TP_fast_assign(
  198. __entry->rcuname = rcuname;
  199. __entry->level = level;
  200. __entry->grplo = grplo;
  201. __entry->grphi = grphi;
  202. __entry->gpevent = gpevent;
  203. ),
  204. TP_printk("%s %d %d %d %s",
  205. __entry->rcuname, __entry->level, __entry->grplo,
  206. __entry->grphi, __entry->gpevent)
  207. );
  208. #ifdef CONFIG_RCU_NOCB_CPU
  209. /*
  210. * Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended
  211. * to assist debugging of these handoffs.
  212. *
  213. * The first argument is the name of the RCU flavor, and the second is
  214. * the number of the offloaded CPU are extracted. The third and final
  215. * argument is a string as follows:
  216. *
  217. * "WakeEmpty": Wake rcuo kthread, first CB to empty list.
  218. * "WakeEmptyIsDeferred": Wake rcuo kthread later, first CB to empty list.
  219. * "WakeOvf": Wake rcuo kthread, CB list is huge.
  220. * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge.
  221. * "WakeNot": Don't wake rcuo kthread.
  222. * "WakeNotPoll": Don't wake rcuo kthread because it is polling.
  223. * "DeferredWake": Carried out the "IsDeferred" wakeup.
  224. * "Poll": Start of new polling cycle for rcu_nocb_poll.
  225. * "Sleep": Sleep waiting for CBs for !rcu_nocb_poll.
  226. * "WokeEmpty": rcuo kthread woke to find empty list.
  227. * "WokeNonEmpty": rcuo kthread woke to find non-empty list.
  228. * "WaitQueue": Enqueue partially done, timed wait for it to complete.
  229. * "WokeQueue": Partial enqueue now complete.
  230. */
  231. TRACE_EVENT(rcu_nocb_wake,
  232. TP_PROTO(const char *rcuname, int cpu, const char *reason),
  233. TP_ARGS(rcuname, cpu, reason),
  234. TP_STRUCT__entry(
  235. __field(const char *, rcuname)
  236. __field(int, cpu)
  237. __field(const char *, reason)
  238. ),
  239. TP_fast_assign(
  240. __entry->rcuname = rcuname;
  241. __entry->cpu = cpu;
  242. __entry->reason = reason;
  243. ),
  244. TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason)
  245. );
  246. #endif
  247. /*
  248. * Tracepoint for tasks blocking within preemptible-RCU read-side
  249. * critical sections. Track the type of RCU (which one day might
  250. * include SRCU), the grace-period number that the task is blocking
  251. * (the current or the next), and the task's PID.
  252. */
  253. TRACE_EVENT(rcu_preempt_task,
  254. TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
  255. TP_ARGS(rcuname, pid, gp_seq),
  256. TP_STRUCT__entry(
  257. __field(const char *, rcuname)
  258. __field(unsigned long, gp_seq)
  259. __field(int, pid)
  260. ),
  261. TP_fast_assign(
  262. __entry->rcuname = rcuname;
  263. __entry->gp_seq = gp_seq;
  264. __entry->pid = pid;
  265. ),
  266. TP_printk("%s %lu %d",
  267. __entry->rcuname, __entry->gp_seq, __entry->pid)
  268. );
  269. /*
  270. * Tracepoint for tasks that blocked within a given preemptible-RCU
  271. * read-side critical section exiting that critical section. Track the
  272. * type of RCU (which one day might include SRCU) and the task's PID.
  273. */
  274. TRACE_EVENT(rcu_unlock_preempted_task,
  275. TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
  276. TP_ARGS(rcuname, gp_seq, pid),
  277. TP_STRUCT__entry(
  278. __field(const char *, rcuname)
  279. __field(unsigned long, gp_seq)
  280. __field(int, pid)
  281. ),
  282. TP_fast_assign(
  283. __entry->rcuname = rcuname;
  284. __entry->gp_seq = gp_seq;
  285. __entry->pid = pid;
  286. ),
  287. TP_printk("%s %lu %d", __entry->rcuname, __entry->gp_seq, __entry->pid)
  288. );
  289. /*
  290. * Tracepoint for quiescent-state-reporting events. These are
  291. * distinguished by the type of RCU, the grace-period number, the
  292. * mask of quiescent lower-level entities, the rcu_node structure level,
  293. * the starting and ending CPU covered by the rcu_node structure, and
  294. * whether there are any blocked tasks blocking the current grace period.
  295. * All but the type of RCU are extracted from the rcu_node structure.
  296. */
  297. TRACE_EVENT(rcu_quiescent_state_report,
  298. TP_PROTO(const char *rcuname, unsigned long gp_seq,
  299. unsigned long mask, unsigned long qsmask,
  300. u8 level, int grplo, int grphi, int gp_tasks),
  301. TP_ARGS(rcuname, gp_seq, mask, qsmask, level, grplo, grphi, gp_tasks),
  302. TP_STRUCT__entry(
  303. __field(const char *, rcuname)
  304. __field(unsigned long, gp_seq)
  305. __field(unsigned long, mask)
  306. __field(unsigned long, qsmask)
  307. __field(u8, level)
  308. __field(int, grplo)
  309. __field(int, grphi)
  310. __field(u8, gp_tasks)
  311. ),
  312. TP_fast_assign(
  313. __entry->rcuname = rcuname;
  314. __entry->gp_seq = gp_seq;
  315. __entry->mask = mask;
  316. __entry->qsmask = qsmask;
  317. __entry->level = level;
  318. __entry->grplo = grplo;
  319. __entry->grphi = grphi;
  320. __entry->gp_tasks = gp_tasks;
  321. ),
  322. TP_printk("%s %lu %lx>%lx %u %d %d %u",
  323. __entry->rcuname, __entry->gp_seq,
  324. __entry->mask, __entry->qsmask, __entry->level,
  325. __entry->grplo, __entry->grphi, __entry->gp_tasks)
  326. );
  327. /*
  328. * Tracepoint for quiescent states detected by force_quiescent_state().
  329. * These trace events include the type of RCU, the grace-period number
  330. * that was blocked by the CPU, the CPU itself, and the type of quiescent
  331. * state, which can be "dti" for dyntick-idle mode, "kick" when kicking
  332. * a CPU that has been in dyntick-idle mode for too long, or "rqc" if the
  333. * CPU got a quiescent state via its rcu_qs_ctr.
  334. */
  335. TRACE_EVENT(rcu_fqs,
  336. TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
  337. TP_ARGS(rcuname, gp_seq, cpu, qsevent),
  338. TP_STRUCT__entry(
  339. __field(const char *, rcuname)
  340. __field(unsigned long, gp_seq)
  341. __field(int, cpu)
  342. __field(const char *, qsevent)
  343. ),
  344. TP_fast_assign(
  345. __entry->rcuname = rcuname;
  346. __entry->gp_seq = gp_seq;
  347. __entry->cpu = cpu;
  348. __entry->qsevent = qsevent;
  349. ),
  350. TP_printk("%s %lu %d %s",
  351. __entry->rcuname, __entry->gp_seq,
  352. __entry->cpu, __entry->qsevent)
  353. );
  354. #endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) */
  355. /*
  356. * Tracepoint for dyntick-idle entry/exit events. These take a string
  357. * as argument: "Start" for entering dyntick-idle mode, "Startirq" for
  358. * entering it from irq/NMI, "End" for leaving it, "Endirq" for leaving it
  359. * to irq/NMI, "--=" for events moving towards idle, and "++=" for events
  360. * moving away from idle.
  361. *
  362. * These events also take a pair of numbers, which indicate the nesting
  363. * depth before and after the event of interest, and a third number that is
  364. * the ->dynticks counter. Note that task-related and interrupt-related
  365. * events use two separate counters, and that the "++=" and "--=" events
  366. * for irq/NMI will change the counter by two, otherwise by one.
  367. */
  368. TRACE_EVENT(rcu_dyntick,
  369. TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
  370. TP_ARGS(polarity, oldnesting, newnesting, dynticks),
  371. TP_STRUCT__entry(
  372. __field(const char *, polarity)
  373. __field(long, oldnesting)
  374. __field(long, newnesting)
  375. __field(int, dynticks)
  376. ),
  377. TP_fast_assign(
  378. __entry->polarity = polarity;
  379. __entry->oldnesting = oldnesting;
  380. __entry->newnesting = newnesting;
  381. __entry->dynticks = atomic_read(&dynticks);
  382. ),
  383. TP_printk("%s %lx %lx %#3x", __entry->polarity,
  384. __entry->oldnesting, __entry->newnesting,
  385. __entry->dynticks & 0xfff)
  386. );
  387. /*
  388. * Tracepoint for the registration of a single RCU callback function.
  389. * The first argument is the type of RCU, the second argument is
  390. * a pointer to the RCU callback itself, the third element is the
  391. * number of lazy callbacks queued, and the fourth element is the
  392. * total number of callbacks queued.
  393. */
  394. TRACE_EVENT(rcu_callback,
  395. TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
  396. long qlen),
  397. TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
  398. TP_STRUCT__entry(
  399. __field(const char *, rcuname)
  400. __field(void *, rhp)
  401. __field(void *, func)
  402. __field(long, qlen_lazy)
  403. __field(long, qlen)
  404. ),
  405. TP_fast_assign(
  406. __entry->rcuname = rcuname;
  407. __entry->rhp = rhp;
  408. __entry->func = rhp->func;
  409. __entry->qlen_lazy = qlen_lazy;
  410. __entry->qlen = qlen;
  411. ),
  412. TP_printk("%s rhp=%p func=%pf %ld/%ld",
  413. __entry->rcuname, __entry->rhp, __entry->func,
  414. __entry->qlen_lazy, __entry->qlen)
  415. );
  416. /*
  417. * Tracepoint for the registration of a single RCU callback of the special
  418. * kfree() form. The first argument is the RCU type, the second argument
  419. * is a pointer to the RCU callback, the third argument is the offset
  420. * of the callback within the enclosing RCU-protected data structure,
  421. * the fourth argument is the number of lazy callbacks queued, and the
  422. * fifth argument is the total number of callbacks queued.
  423. */
  424. TRACE_EVENT(rcu_kfree_callback,
  425. TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
  426. long qlen_lazy, long qlen),
  427. TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
  428. TP_STRUCT__entry(
  429. __field(const char *, rcuname)
  430. __field(void *, rhp)
  431. __field(unsigned long, offset)
  432. __field(long, qlen_lazy)
  433. __field(long, qlen)
  434. ),
  435. TP_fast_assign(
  436. __entry->rcuname = rcuname;
  437. __entry->rhp = rhp;
  438. __entry->offset = offset;
  439. __entry->qlen_lazy = qlen_lazy;
  440. __entry->qlen = qlen;
  441. ),
  442. TP_printk("%s rhp=%p func=%ld %ld/%ld",
  443. __entry->rcuname, __entry->rhp, __entry->offset,
  444. __entry->qlen_lazy, __entry->qlen)
  445. );
  446. /*
  447. * Tracepoint for marking the beginning rcu_do_batch, performed to start
  448. * RCU callback invocation. The first argument is the RCU flavor,
  449. * the second is the number of lazy callbacks queued, the third is
  450. * the total number of callbacks queued, and the fourth argument is
  451. * the current RCU-callback batch limit.
  452. */
  453. TRACE_EVENT(rcu_batch_start,
  454. TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
  455. TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
  456. TP_STRUCT__entry(
  457. __field(const char *, rcuname)
  458. __field(long, qlen_lazy)
  459. __field(long, qlen)
  460. __field(long, blimit)
  461. ),
  462. TP_fast_assign(
  463. __entry->rcuname = rcuname;
  464. __entry->qlen_lazy = qlen_lazy;
  465. __entry->qlen = qlen;
  466. __entry->blimit = blimit;
  467. ),
  468. TP_printk("%s CBs=%ld/%ld bl=%ld",
  469. __entry->rcuname, __entry->qlen_lazy, __entry->qlen,
  470. __entry->blimit)
  471. );
  472. /*
  473. * Tracepoint for the invocation of a single RCU callback function.
  474. * The first argument is the type of RCU, and the second argument is
  475. * a pointer to the RCU callback itself.
  476. */
  477. TRACE_EVENT(rcu_invoke_callback,
  478. TP_PROTO(const char *rcuname, struct rcu_head *rhp),
  479. TP_ARGS(rcuname, rhp),
  480. TP_STRUCT__entry(
  481. __field(const char *, rcuname)
  482. __field(void *, rhp)
  483. __field(void *, func)
  484. ),
  485. TP_fast_assign(
  486. __entry->rcuname = rcuname;
  487. __entry->rhp = rhp;
  488. __entry->func = rhp->func;
  489. ),
  490. TP_printk("%s rhp=%p func=%pf",
  491. __entry->rcuname, __entry->rhp, __entry->func)
  492. );
  493. /*
  494. * Tracepoint for the invocation of a single RCU callback of the special
  495. * kfree() form. The first argument is the RCU flavor, the second
  496. * argument is a pointer to the RCU callback, and the third argument
  497. * is the offset of the callback within the enclosing RCU-protected
  498. * data structure.
  499. */
  500. TRACE_EVENT(rcu_invoke_kfree_callback,
  501. TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
  502. TP_ARGS(rcuname, rhp, offset),
  503. TP_STRUCT__entry(
  504. __field(const char *, rcuname)
  505. __field(void *, rhp)
  506. __field(unsigned long, offset)
  507. ),
  508. TP_fast_assign(
  509. __entry->rcuname = rcuname;
  510. __entry->rhp = rhp;
  511. __entry->offset = offset;
  512. ),
  513. TP_printk("%s rhp=%p func=%ld",
  514. __entry->rcuname, __entry->rhp, __entry->offset)
  515. );
  516. /*
  517. * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
  518. * invoked. The first argument is the name of the RCU flavor,
  519. * the second argument is number of callbacks actually invoked,
  520. * the third argument (cb) is whether or not any of the callbacks that
  521. * were ready to invoke at the beginning of this batch are still
  522. * queued, the fourth argument (nr) is the return value of need_resched(),
  523. * the fifth argument (iit) is 1 if the current task is the idle task,
  524. * and the sixth argument (risk) is the return value from
  525. * rcu_is_callbacks_kthread().
  526. */
  527. TRACE_EVENT(rcu_batch_end,
  528. TP_PROTO(const char *rcuname, int callbacks_invoked,
  529. char cb, char nr, char iit, char risk),
  530. TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
  531. TP_STRUCT__entry(
  532. __field(const char *, rcuname)
  533. __field(int, callbacks_invoked)
  534. __field(char, cb)
  535. __field(char, nr)
  536. __field(char, iit)
  537. __field(char, risk)
  538. ),
  539. TP_fast_assign(
  540. __entry->rcuname = rcuname;
  541. __entry->callbacks_invoked = callbacks_invoked;
  542. __entry->cb = cb;
  543. __entry->nr = nr;
  544. __entry->iit = iit;
  545. __entry->risk = risk;
  546. ),
  547. TP_printk("%s CBs-invoked=%d idle=%c%c%c%c",
  548. __entry->rcuname, __entry->callbacks_invoked,
  549. __entry->cb ? 'C' : '.',
  550. __entry->nr ? 'S' : '.',
  551. __entry->iit ? 'I' : '.',
  552. __entry->risk ? 'R' : '.')
  553. );
  554. /*
  555. * Tracepoint for rcutorture readers. The first argument is the name
  556. * of the RCU flavor from rcutorture's viewpoint and the second argument
  557. * is the callback address. The third argument is the start time in
  558. * seconds, and the last two arguments are the grace period numbers
  559. * at the beginning and end of the read, respectively. Note that the
  560. * callback address can be NULL.
  561. */
  562. #define RCUTORTURENAME_LEN 8
  563. TRACE_EVENT(rcu_torture_read,
  564. TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
  565. unsigned long secs, unsigned long c_old, unsigned long c),
  566. TP_ARGS(rcutorturename, rhp, secs, c_old, c),
  567. TP_STRUCT__entry(
  568. __field(char, rcutorturename[RCUTORTURENAME_LEN])
  569. __field(struct rcu_head *, rhp)
  570. __field(unsigned long, secs)
  571. __field(unsigned long, c_old)
  572. __field(unsigned long, c)
  573. ),
  574. TP_fast_assign(
  575. strncpy(__entry->rcutorturename, rcutorturename,
  576. RCUTORTURENAME_LEN);
  577. __entry->rcutorturename[RCUTORTURENAME_LEN - 1] = 0;
  578. __entry->rhp = rhp;
  579. __entry->secs = secs;
  580. __entry->c_old = c_old;
  581. __entry->c = c;
  582. ),
  583. TP_printk("%s torture read %p %luus c: %lu %lu",
  584. __entry->rcutorturename, __entry->rhp,
  585. __entry->secs, __entry->c_old, __entry->c)
  586. );
  587. /*
  588. * Tracepoint for _rcu_barrier() execution. The string "s" describes
  589. * the _rcu_barrier phase:
  590. * "Begin": _rcu_barrier() started.
  591. * "EarlyExit": _rcu_barrier() piggybacked, thus early exit.
  592. * "Inc1": _rcu_barrier() piggyback check counter incremented.
  593. * "OfflineNoCB": _rcu_barrier() found callback on never-online CPU
  594. * "OnlineNoCB": _rcu_barrier() found online no-CBs CPU.
  595. * "OnlineQ": _rcu_barrier() found online CPU with callbacks.
  596. * "OnlineNQ": _rcu_barrier() found online CPU, no callbacks.
  597. * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
  598. * "IRQNQ": An rcu_barrier_callback() callback found no callbacks.
  599. * "CB": An rcu_barrier_callback() invoked a callback, not the last.
  600. * "LastCB": An rcu_barrier_callback() invoked the last callback.
  601. * "Inc2": _rcu_barrier() piggyback check counter incremented.
  602. * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
  603. * is the count of remaining callbacks, and "done" is the piggybacking count.
  604. */
  605. TRACE_EVENT(rcu_barrier,
  606. TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
  607. TP_ARGS(rcuname, s, cpu, cnt, done),
  608. TP_STRUCT__entry(
  609. __field(const char *, rcuname)
  610. __field(const char *, s)
  611. __field(int, cpu)
  612. __field(int, cnt)
  613. __field(unsigned long, done)
  614. ),
  615. TP_fast_assign(
  616. __entry->rcuname = rcuname;
  617. __entry->s = s;
  618. __entry->cpu = cpu;
  619. __entry->cnt = cnt;
  620. __entry->done = done;
  621. ),
  622. TP_printk("%s %s cpu %d remaining %d # %lu",
  623. __entry->rcuname, __entry->s, __entry->cpu, __entry->cnt,
  624. __entry->done)
  625. );
  626. #else /* #ifdef CONFIG_RCU_TRACE */
  627. #define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
  628. #define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \
  629. level, grplo, grphi, event) \
  630. do { } while (0)
  631. #define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
  632. qsmask) do { } while (0)
  633. #define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
  634. do { } while (0)
  635. #define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
  636. do { } while (0)
  637. #define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
  638. #define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
  639. #define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
  640. #define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
  641. grplo, grphi, gp_tasks) do { } \
  642. while (0)
  643. #define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
  644. #define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
  645. #define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
  646. #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
  647. do { } while (0)
  648. #define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
  649. do { } while (0)
  650. #define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
  651. #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
  652. #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
  653. do { } while (0)
  654. #define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
  655. do { } while (0)
  656. #define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
  657. #endif /* #else #ifdef CONFIG_RCU_TRACE */
  658. #endif /* _TRACE_RCU_H */
  659. /* This part must be outside protection */
  660. #include <trace/define_trace.h>