rpcrdma.h 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (c) 2017, 2018 Oracle. All rights reserved.
  4. *
  5. * Trace point definitions for the "rpcrdma" subsystem.
  6. */
  7. #undef TRACE_SYSTEM
  8. #define TRACE_SYSTEM rpcrdma
  9. #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
  10. #define _TRACE_RPCRDMA_H
  11. #include <linux/tracepoint.h>
  12. #include <trace/events/rdma.h>
  13. /**
  14. ** Event classes
  15. **/
  16. DECLARE_EVENT_CLASS(xprtrdma_reply_event,
  17. TP_PROTO(
  18. const struct rpcrdma_rep *rep
  19. ),
  20. TP_ARGS(rep),
  21. TP_STRUCT__entry(
  22. __field(const void *, rep)
  23. __field(const void *, r_xprt)
  24. __field(u32, xid)
  25. __field(u32, version)
  26. __field(u32, proc)
  27. ),
  28. TP_fast_assign(
  29. __entry->rep = rep;
  30. __entry->r_xprt = rep->rr_rxprt;
  31. __entry->xid = be32_to_cpu(rep->rr_xid);
  32. __entry->version = be32_to_cpu(rep->rr_vers);
  33. __entry->proc = be32_to_cpu(rep->rr_proc);
  34. ),
  35. TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
  36. __entry->r_xprt, __entry->xid, __entry->rep,
  37. __entry->version, __entry->proc
  38. )
  39. );
  40. #define DEFINE_REPLY_EVENT(name) \
  41. DEFINE_EVENT(xprtrdma_reply_event, name, \
  42. TP_PROTO( \
  43. const struct rpcrdma_rep *rep \
  44. ), \
  45. TP_ARGS(rep))
  46. DECLARE_EVENT_CLASS(xprtrdma_rxprt,
  47. TP_PROTO(
  48. const struct rpcrdma_xprt *r_xprt
  49. ),
  50. TP_ARGS(r_xprt),
  51. TP_STRUCT__entry(
  52. __field(const void *, r_xprt)
  53. __string(addr, rpcrdma_addrstr(r_xprt))
  54. __string(port, rpcrdma_portstr(r_xprt))
  55. ),
  56. TP_fast_assign(
  57. __entry->r_xprt = r_xprt;
  58. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  59. __assign_str(port, rpcrdma_portstr(r_xprt));
  60. ),
  61. TP_printk("peer=[%s]:%s r_xprt=%p",
  62. __get_str(addr), __get_str(port), __entry->r_xprt
  63. )
  64. );
  65. #define DEFINE_RXPRT_EVENT(name) \
  66. DEFINE_EVENT(xprtrdma_rxprt, name, \
  67. TP_PROTO( \
  68. const struct rpcrdma_xprt *r_xprt \
  69. ), \
  70. TP_ARGS(r_xprt))
  71. DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
  72. TP_PROTO(
  73. const struct rpc_task *task,
  74. unsigned int pos,
  75. struct rpcrdma_mr *mr,
  76. int nsegs
  77. ),
  78. TP_ARGS(task, pos, mr, nsegs),
  79. TP_STRUCT__entry(
  80. __field(unsigned int, task_id)
  81. __field(unsigned int, client_id)
  82. __field(const void *, mr)
  83. __field(unsigned int, pos)
  84. __field(int, nents)
  85. __field(u32, handle)
  86. __field(u32, length)
  87. __field(u64, offset)
  88. __field(int, nsegs)
  89. ),
  90. TP_fast_assign(
  91. __entry->task_id = task->tk_pid;
  92. __entry->client_id = task->tk_client->cl_clid;
  93. __entry->mr = mr;
  94. __entry->pos = pos;
  95. __entry->nents = mr->mr_nents;
  96. __entry->handle = mr->mr_handle;
  97. __entry->length = mr->mr_length;
  98. __entry->offset = mr->mr_offset;
  99. __entry->nsegs = nsegs;
  100. ),
  101. TP_printk("task:%u@%u mr=%p pos=%u %u@0x%016llx:0x%08x (%s)",
  102. __entry->task_id, __entry->client_id, __entry->mr,
  103. __entry->pos, __entry->length,
  104. (unsigned long long)__entry->offset, __entry->handle,
  105. __entry->nents < __entry->nsegs ? "more" : "last"
  106. )
  107. );
  108. #define DEFINE_RDCH_EVENT(name) \
  109. DEFINE_EVENT(xprtrdma_rdch_event, name, \
  110. TP_PROTO( \
  111. const struct rpc_task *task, \
  112. unsigned int pos, \
  113. struct rpcrdma_mr *mr, \
  114. int nsegs \
  115. ), \
  116. TP_ARGS(task, pos, mr, nsegs))
  117. DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
  118. TP_PROTO(
  119. const struct rpc_task *task,
  120. struct rpcrdma_mr *mr,
  121. int nsegs
  122. ),
  123. TP_ARGS(task, mr, nsegs),
  124. TP_STRUCT__entry(
  125. __field(unsigned int, task_id)
  126. __field(unsigned int, client_id)
  127. __field(const void *, mr)
  128. __field(int, nents)
  129. __field(u32, handle)
  130. __field(u32, length)
  131. __field(u64, offset)
  132. __field(int, nsegs)
  133. ),
  134. TP_fast_assign(
  135. __entry->task_id = task->tk_pid;
  136. __entry->client_id = task->tk_client->cl_clid;
  137. __entry->mr = mr;
  138. __entry->nents = mr->mr_nents;
  139. __entry->handle = mr->mr_handle;
  140. __entry->length = mr->mr_length;
  141. __entry->offset = mr->mr_offset;
  142. __entry->nsegs = nsegs;
  143. ),
  144. TP_printk("task:%u@%u mr=%p %u@0x%016llx:0x%08x (%s)",
  145. __entry->task_id, __entry->client_id, __entry->mr,
  146. __entry->length, (unsigned long long)__entry->offset,
  147. __entry->handle,
  148. __entry->nents < __entry->nsegs ? "more" : "last"
  149. )
  150. );
  151. #define DEFINE_WRCH_EVENT(name) \
  152. DEFINE_EVENT(xprtrdma_wrch_event, name, \
  153. TP_PROTO( \
  154. const struct rpc_task *task, \
  155. struct rpcrdma_mr *mr, \
  156. int nsegs \
  157. ), \
  158. TP_ARGS(task, mr, nsegs))
  159. TRACE_DEFINE_ENUM(FRWR_IS_INVALID);
  160. TRACE_DEFINE_ENUM(FRWR_IS_VALID);
  161. TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR);
  162. TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI);
  163. #define xprtrdma_show_frwr_state(x) \
  164. __print_symbolic(x, \
  165. { FRWR_IS_INVALID, "INVALID" }, \
  166. { FRWR_IS_VALID, "VALID" }, \
  167. { FRWR_FLUSHED_FR, "FLUSHED_FR" }, \
  168. { FRWR_FLUSHED_LI, "FLUSHED_LI" })
  169. DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
  170. TP_PROTO(
  171. const struct ib_wc *wc,
  172. const struct rpcrdma_frwr *frwr
  173. ),
  174. TP_ARGS(wc, frwr),
  175. TP_STRUCT__entry(
  176. __field(const void *, mr)
  177. __field(unsigned int, state)
  178. __field(unsigned int, status)
  179. __field(unsigned int, vendor_err)
  180. ),
  181. TP_fast_assign(
  182. __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
  183. __entry->state = frwr->fr_state;
  184. __entry->status = wc->status;
  185. __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
  186. ),
  187. TP_printk(
  188. "mr=%p state=%s: %s (%u/0x%x)",
  189. __entry->mr, xprtrdma_show_frwr_state(__entry->state),
  190. rdma_show_wc_status(__entry->status),
  191. __entry->status, __entry->vendor_err
  192. )
  193. );
  194. #define DEFINE_FRWR_DONE_EVENT(name) \
  195. DEFINE_EVENT(xprtrdma_frwr_done, name, \
  196. TP_PROTO( \
  197. const struct ib_wc *wc, \
  198. const struct rpcrdma_frwr *frwr \
  199. ), \
  200. TP_ARGS(wc, frwr))
  201. DECLARE_EVENT_CLASS(xprtrdma_mr,
  202. TP_PROTO(
  203. const struct rpcrdma_mr *mr
  204. ),
  205. TP_ARGS(mr),
  206. TP_STRUCT__entry(
  207. __field(const void *, mr)
  208. __field(u32, handle)
  209. __field(u32, length)
  210. __field(u64, offset)
  211. ),
  212. TP_fast_assign(
  213. __entry->mr = mr;
  214. __entry->handle = mr->mr_handle;
  215. __entry->length = mr->mr_length;
  216. __entry->offset = mr->mr_offset;
  217. ),
  218. TP_printk("mr=%p %u@0x%016llx:0x%08x",
  219. __entry->mr, __entry->length,
  220. (unsigned long long)__entry->offset,
  221. __entry->handle
  222. )
  223. );
  224. #define DEFINE_MR_EVENT(name) \
  225. DEFINE_EVENT(xprtrdma_mr, name, \
  226. TP_PROTO( \
  227. const struct rpcrdma_mr *mr \
  228. ), \
  229. TP_ARGS(mr))
  230. DECLARE_EVENT_CLASS(xprtrdma_cb_event,
  231. TP_PROTO(
  232. const struct rpc_rqst *rqst
  233. ),
  234. TP_ARGS(rqst),
  235. TP_STRUCT__entry(
  236. __field(const void *, rqst)
  237. __field(const void *, rep)
  238. __field(const void *, req)
  239. __field(u32, xid)
  240. ),
  241. TP_fast_assign(
  242. __entry->rqst = rqst;
  243. __entry->req = rpcr_to_rdmar(rqst);
  244. __entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
  245. __entry->xid = be32_to_cpu(rqst->rq_xid);
  246. ),
  247. TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
  248. __entry->xid, __entry->rqst, __entry->req, __entry->rep
  249. )
  250. );
  251. #define DEFINE_CB_EVENT(name) \
  252. DEFINE_EVENT(xprtrdma_cb_event, name, \
  253. TP_PROTO( \
  254. const struct rpc_rqst *rqst \
  255. ), \
  256. TP_ARGS(rqst))
  257. /**
  258. ** Connection events
  259. **/
  260. TRACE_EVENT(xprtrdma_conn_upcall,
  261. TP_PROTO(
  262. const struct rpcrdma_xprt *r_xprt,
  263. struct rdma_cm_event *event
  264. ),
  265. TP_ARGS(r_xprt, event),
  266. TP_STRUCT__entry(
  267. __field(const void *, r_xprt)
  268. __field(unsigned int, event)
  269. __field(int, status)
  270. __string(addr, rpcrdma_addrstr(r_xprt))
  271. __string(port, rpcrdma_portstr(r_xprt))
  272. ),
  273. TP_fast_assign(
  274. __entry->r_xprt = r_xprt;
  275. __entry->event = event->event;
  276. __entry->status = event->status;
  277. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  278. __assign_str(port, rpcrdma_portstr(r_xprt));
  279. ),
  280. TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
  281. __get_str(addr), __get_str(port),
  282. __entry->r_xprt, rdma_show_cm_event(__entry->event),
  283. __entry->event, __entry->status
  284. )
  285. );
  286. TRACE_EVENT(xprtrdma_disconnect,
  287. TP_PROTO(
  288. const struct rpcrdma_xprt *r_xprt,
  289. int status
  290. ),
  291. TP_ARGS(r_xprt, status),
  292. TP_STRUCT__entry(
  293. __field(const void *, r_xprt)
  294. __field(int, status)
  295. __field(int, connected)
  296. __string(addr, rpcrdma_addrstr(r_xprt))
  297. __string(port, rpcrdma_portstr(r_xprt))
  298. ),
  299. TP_fast_assign(
  300. __entry->r_xprt = r_xprt;
  301. __entry->status = status;
  302. __entry->connected = r_xprt->rx_ep.rep_connected;
  303. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  304. __assign_str(port, rpcrdma_portstr(r_xprt));
  305. ),
  306. TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
  307. __get_str(addr), __get_str(port),
  308. __entry->r_xprt, __entry->status,
  309. __entry->connected == 1 ? "still " : "dis"
  310. )
  311. );
  312. DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
  313. DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
  314. DEFINE_RXPRT_EVENT(xprtrdma_create);
  315. DEFINE_RXPRT_EVENT(xprtrdma_destroy);
  316. DEFINE_RXPRT_EVENT(xprtrdma_remove);
  317. DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
  318. DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
  319. DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc);
  320. TRACE_EVENT(xprtrdma_qp_error,
  321. TP_PROTO(
  322. const struct rpcrdma_xprt *r_xprt,
  323. const struct ib_event *event
  324. ),
  325. TP_ARGS(r_xprt, event),
  326. TP_STRUCT__entry(
  327. __field(const void *, r_xprt)
  328. __field(unsigned int, event)
  329. __string(name, event->device->name)
  330. __string(addr, rpcrdma_addrstr(r_xprt))
  331. __string(port, rpcrdma_portstr(r_xprt))
  332. ),
  333. TP_fast_assign(
  334. __entry->r_xprt = r_xprt;
  335. __entry->event = event->event;
  336. __assign_str(name, event->device->name);
  337. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  338. __assign_str(port, rpcrdma_portstr(r_xprt));
  339. ),
  340. TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
  341. __get_str(addr), __get_str(port), __entry->r_xprt,
  342. __get_str(name), rdma_show_ib_event(__entry->event),
  343. __entry->event
  344. )
  345. );
  346. /**
  347. ** Call events
  348. **/
  349. TRACE_EVENT(xprtrdma_createmrs,
  350. TP_PROTO(
  351. const struct rpcrdma_xprt *r_xprt,
  352. unsigned int count
  353. ),
  354. TP_ARGS(r_xprt, count),
  355. TP_STRUCT__entry(
  356. __field(const void *, r_xprt)
  357. __field(unsigned int, count)
  358. ),
  359. TP_fast_assign(
  360. __entry->r_xprt = r_xprt;
  361. __entry->count = count;
  362. ),
  363. TP_printk("r_xprt=%p: created %u MRs",
  364. __entry->r_xprt, __entry->count
  365. )
  366. );
  367. DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
  368. DEFINE_RDCH_EVENT(xprtrdma_read_chunk);
  369. DEFINE_WRCH_EVENT(xprtrdma_write_chunk);
  370. DEFINE_WRCH_EVENT(xprtrdma_reply_chunk);
  371. TRACE_DEFINE_ENUM(rpcrdma_noch);
  372. TRACE_DEFINE_ENUM(rpcrdma_readch);
  373. TRACE_DEFINE_ENUM(rpcrdma_areadch);
  374. TRACE_DEFINE_ENUM(rpcrdma_writech);
  375. TRACE_DEFINE_ENUM(rpcrdma_replych);
  376. #define xprtrdma_show_chunktype(x) \
  377. __print_symbolic(x, \
  378. { rpcrdma_noch, "inline" }, \
  379. { rpcrdma_readch, "read list" }, \
  380. { rpcrdma_areadch, "*read list" }, \
  381. { rpcrdma_writech, "write list" }, \
  382. { rpcrdma_replych, "reply chunk" })
  383. TRACE_EVENT(xprtrdma_marshal,
  384. TP_PROTO(
  385. const struct rpc_rqst *rqst,
  386. unsigned int hdrlen,
  387. unsigned int rtype,
  388. unsigned int wtype
  389. ),
  390. TP_ARGS(rqst, hdrlen, rtype, wtype),
  391. TP_STRUCT__entry(
  392. __field(unsigned int, task_id)
  393. __field(unsigned int, client_id)
  394. __field(u32, xid)
  395. __field(unsigned int, hdrlen)
  396. __field(unsigned int, headlen)
  397. __field(unsigned int, pagelen)
  398. __field(unsigned int, taillen)
  399. __field(unsigned int, rtype)
  400. __field(unsigned int, wtype)
  401. ),
  402. TP_fast_assign(
  403. __entry->task_id = rqst->rq_task->tk_pid;
  404. __entry->client_id = rqst->rq_task->tk_client->cl_clid;
  405. __entry->xid = be32_to_cpu(rqst->rq_xid);
  406. __entry->hdrlen = hdrlen;
  407. __entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
  408. __entry->pagelen = rqst->rq_snd_buf.page_len;
  409. __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
  410. __entry->rtype = rtype;
  411. __entry->wtype = wtype;
  412. ),
  413. TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
  414. __entry->task_id, __entry->client_id, __entry->xid,
  415. __entry->hdrlen,
  416. __entry->headlen, __entry->pagelen, __entry->taillen,
  417. xprtrdma_show_chunktype(__entry->rtype),
  418. xprtrdma_show_chunktype(__entry->wtype)
  419. )
  420. );
  421. TRACE_EVENT(xprtrdma_post_send,
  422. TP_PROTO(
  423. const struct rpcrdma_req *req,
  424. int status
  425. ),
  426. TP_ARGS(req, status),
  427. TP_STRUCT__entry(
  428. __field(const void *, req)
  429. __field(int, num_sge)
  430. __field(bool, signaled)
  431. __field(int, status)
  432. ),
  433. TP_fast_assign(
  434. __entry->req = req;
  435. __entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
  436. __entry->signaled = req->rl_sendctx->sc_wr.send_flags &
  437. IB_SEND_SIGNALED;
  438. __entry->status = status;
  439. ),
  440. TP_printk("req=%p, %d SGEs%s, status=%d",
  441. __entry->req, __entry->num_sge,
  442. (__entry->signaled ? ", signaled" : ""),
  443. __entry->status
  444. )
  445. );
  446. TRACE_EVENT(xprtrdma_post_recv,
  447. TP_PROTO(
  448. const struct ib_cqe *cqe
  449. ),
  450. TP_ARGS(cqe),
  451. TP_STRUCT__entry(
  452. __field(const void *, cqe)
  453. ),
  454. TP_fast_assign(
  455. __entry->cqe = cqe;
  456. ),
  457. TP_printk("cqe=%p",
  458. __entry->cqe
  459. )
  460. );
  461. TRACE_EVENT(xprtrdma_post_recvs,
  462. TP_PROTO(
  463. const struct rpcrdma_xprt *r_xprt,
  464. unsigned int count,
  465. int status
  466. ),
  467. TP_ARGS(r_xprt, count, status),
  468. TP_STRUCT__entry(
  469. __field(const void *, r_xprt)
  470. __field(unsigned int, count)
  471. __field(int, status)
  472. __field(int, posted)
  473. __string(addr, rpcrdma_addrstr(r_xprt))
  474. __string(port, rpcrdma_portstr(r_xprt))
  475. ),
  476. TP_fast_assign(
  477. __entry->r_xprt = r_xprt;
  478. __entry->count = count;
  479. __entry->status = status;
  480. __entry->posted = r_xprt->rx_buf.rb_posted_receives;
  481. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  482. __assign_str(port, rpcrdma_portstr(r_xprt));
  483. ),
  484. TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
  485. __get_str(addr), __get_str(port), __entry->r_xprt,
  486. __entry->count, __entry->posted, __entry->status
  487. )
  488. );
  489. /**
  490. ** Completion events
  491. **/
  492. TRACE_EVENT(xprtrdma_wc_send,
  493. TP_PROTO(
  494. const struct rpcrdma_sendctx *sc,
  495. const struct ib_wc *wc
  496. ),
  497. TP_ARGS(sc, wc),
  498. TP_STRUCT__entry(
  499. __field(const void *, req)
  500. __field(unsigned int, unmap_count)
  501. __field(unsigned int, status)
  502. __field(unsigned int, vendor_err)
  503. ),
  504. TP_fast_assign(
  505. __entry->req = sc->sc_req;
  506. __entry->unmap_count = sc->sc_unmap_count;
  507. __entry->status = wc->status;
  508. __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
  509. ),
  510. TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
  511. __entry->req, __entry->unmap_count,
  512. rdma_show_wc_status(__entry->status),
  513. __entry->status, __entry->vendor_err
  514. )
  515. );
  516. TRACE_EVENT(xprtrdma_wc_receive,
  517. TP_PROTO(
  518. const struct ib_wc *wc
  519. ),
  520. TP_ARGS(wc),
  521. TP_STRUCT__entry(
  522. __field(const void *, cqe)
  523. __field(u32, byte_len)
  524. __field(unsigned int, status)
  525. __field(u32, vendor_err)
  526. ),
  527. TP_fast_assign(
  528. __entry->cqe = wc->wr_cqe;
  529. __entry->status = wc->status;
  530. if (wc->status) {
  531. __entry->byte_len = 0;
  532. __entry->vendor_err = wc->vendor_err;
  533. } else {
  534. __entry->byte_len = wc->byte_len;
  535. __entry->vendor_err = 0;
  536. }
  537. ),
  538. TP_printk("cqe=%p %u bytes: %s (%u/0x%x)",
  539. __entry->cqe, __entry->byte_len,
  540. rdma_show_wc_status(__entry->status),
  541. __entry->status, __entry->vendor_err
  542. )
  543. );
  544. DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
  545. DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
  546. DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
  547. DEFINE_MR_EVENT(xprtrdma_localinv);
  548. DEFINE_MR_EVENT(xprtrdma_dma_map);
  549. DEFINE_MR_EVENT(xprtrdma_dma_unmap);
  550. DEFINE_MR_EVENT(xprtrdma_remoteinv);
  551. DEFINE_MR_EVENT(xprtrdma_recover_mr);
  552. /**
  553. ** Reply events
  554. **/
  555. TRACE_EVENT(xprtrdma_reply,
  556. TP_PROTO(
  557. const struct rpc_task *task,
  558. const struct rpcrdma_rep *rep,
  559. const struct rpcrdma_req *req,
  560. unsigned int credits
  561. ),
  562. TP_ARGS(task, rep, req, credits),
  563. TP_STRUCT__entry(
  564. __field(unsigned int, task_id)
  565. __field(unsigned int, client_id)
  566. __field(const void *, rep)
  567. __field(const void *, req)
  568. __field(u32, xid)
  569. __field(unsigned int, credits)
  570. ),
  571. TP_fast_assign(
  572. __entry->task_id = task->tk_pid;
  573. __entry->client_id = task->tk_client->cl_clid;
  574. __entry->rep = rep;
  575. __entry->req = req;
  576. __entry->xid = be32_to_cpu(rep->rr_xid);
  577. __entry->credits = credits;
  578. ),
  579. TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
  580. __entry->task_id, __entry->client_id, __entry->xid,
  581. __entry->credits, __entry->rep, __entry->req
  582. )
  583. );
  584. TRACE_EVENT(xprtrdma_defer_cmp,
  585. TP_PROTO(
  586. const struct rpcrdma_rep *rep
  587. ),
  588. TP_ARGS(rep),
  589. TP_STRUCT__entry(
  590. __field(unsigned int, task_id)
  591. __field(unsigned int, client_id)
  592. __field(const void *, rep)
  593. __field(u32, xid)
  594. ),
  595. TP_fast_assign(
  596. __entry->task_id = rep->rr_rqst->rq_task->tk_pid;
  597. __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
  598. __entry->rep = rep;
  599. __entry->xid = be32_to_cpu(rep->rr_xid);
  600. ),
  601. TP_printk("task:%u@%u xid=0x%08x rep=%p",
  602. __entry->task_id, __entry->client_id, __entry->xid,
  603. __entry->rep
  604. )
  605. );
  606. DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
  607. DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
  608. DEFINE_REPLY_EVENT(xprtrdma_reply_short);
  609. DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
  610. TRACE_EVENT(xprtrdma_fixup,
  611. TP_PROTO(
  612. const struct rpc_rqst *rqst,
  613. int len,
  614. int hdrlen
  615. ),
  616. TP_ARGS(rqst, len, hdrlen),
  617. TP_STRUCT__entry(
  618. __field(unsigned int, task_id)
  619. __field(unsigned int, client_id)
  620. __field(const void *, base)
  621. __field(int, len)
  622. __field(int, hdrlen)
  623. ),
  624. TP_fast_assign(
  625. __entry->task_id = rqst->rq_task->tk_pid;
  626. __entry->client_id = rqst->rq_task->tk_client->cl_clid;
  627. __entry->base = rqst->rq_rcv_buf.head[0].iov_base;
  628. __entry->len = len;
  629. __entry->hdrlen = hdrlen;
  630. ),
  631. TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
  632. __entry->task_id, __entry->client_id,
  633. __entry->base, __entry->len, __entry->hdrlen
  634. )
  635. );
  636. TRACE_EVENT(xprtrdma_fixup_pg,
  637. TP_PROTO(
  638. const struct rpc_rqst *rqst,
  639. int pageno,
  640. const void *pos,
  641. int len,
  642. int curlen
  643. ),
  644. TP_ARGS(rqst, pageno, pos, len, curlen),
  645. TP_STRUCT__entry(
  646. __field(unsigned int, task_id)
  647. __field(unsigned int, client_id)
  648. __field(const void *, pos)
  649. __field(int, pageno)
  650. __field(int, len)
  651. __field(int, curlen)
  652. ),
  653. TP_fast_assign(
  654. __entry->task_id = rqst->rq_task->tk_pid;
  655. __entry->client_id = rqst->rq_task->tk_client->cl_clid;
  656. __entry->pos = pos;
  657. __entry->pageno = pageno;
  658. __entry->len = len;
  659. __entry->curlen = curlen;
  660. ),
  661. TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
  662. __entry->task_id, __entry->client_id,
  663. __entry->pageno, __entry->pos, __entry->len, __entry->curlen
  664. )
  665. );
  666. TRACE_EVENT(xprtrdma_decode_seg,
  667. TP_PROTO(
  668. u32 handle,
  669. u32 length,
  670. u64 offset
  671. ),
  672. TP_ARGS(handle, length, offset),
  673. TP_STRUCT__entry(
  674. __field(u32, handle)
  675. __field(u32, length)
  676. __field(u64, offset)
  677. ),
  678. TP_fast_assign(
  679. __entry->handle = handle;
  680. __entry->length = length;
  681. __entry->offset = offset;
  682. ),
  683. TP_printk("%u@0x%016llx:0x%08x",
  684. __entry->length, (unsigned long long)__entry->offset,
  685. __entry->handle
  686. )
  687. );
  688. /**
  689. ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
  690. **/
  691. TRACE_EVENT(xprtrdma_allocate,
  692. TP_PROTO(
  693. const struct rpc_task *task,
  694. const struct rpcrdma_req *req
  695. ),
  696. TP_ARGS(task, req),
  697. TP_STRUCT__entry(
  698. __field(unsigned int, task_id)
  699. __field(unsigned int, client_id)
  700. __field(const void *, req)
  701. __field(size_t, callsize)
  702. __field(size_t, rcvsize)
  703. ),
  704. TP_fast_assign(
  705. __entry->task_id = task->tk_pid;
  706. __entry->client_id = task->tk_client->cl_clid;
  707. __entry->req = req;
  708. __entry->callsize = task->tk_rqstp->rq_callsize;
  709. __entry->rcvsize = task->tk_rqstp->rq_rcvsize;
  710. ),
  711. TP_printk("task:%u@%u req=%p (%zu, %zu)",
  712. __entry->task_id, __entry->client_id,
  713. __entry->req, __entry->callsize, __entry->rcvsize
  714. )
  715. );
  716. TRACE_EVENT(xprtrdma_rpc_done,
  717. TP_PROTO(
  718. const struct rpc_task *task,
  719. const struct rpcrdma_req *req
  720. ),
  721. TP_ARGS(task, req),
  722. TP_STRUCT__entry(
  723. __field(unsigned int, task_id)
  724. __field(unsigned int, client_id)
  725. __field(const void *, req)
  726. __field(const void *, rep)
  727. ),
  728. TP_fast_assign(
  729. __entry->task_id = task->tk_pid;
  730. __entry->client_id = task->tk_client->cl_clid;
  731. __entry->req = req;
  732. __entry->rep = req->rl_reply;
  733. ),
  734. TP_printk("task:%u@%u req=%p rep=%p",
  735. __entry->task_id, __entry->client_id,
  736. __entry->req, __entry->rep
  737. )
  738. );
  739. /**
  740. ** Callback events
  741. **/
  742. TRACE_EVENT(xprtrdma_cb_setup,
  743. TP_PROTO(
  744. const struct rpcrdma_xprt *r_xprt,
  745. unsigned int reqs
  746. ),
  747. TP_ARGS(r_xprt, reqs),
  748. TP_STRUCT__entry(
  749. __field(const void *, r_xprt)
  750. __field(unsigned int, reqs)
  751. __string(addr, rpcrdma_addrstr(r_xprt))
  752. __string(port, rpcrdma_portstr(r_xprt))
  753. ),
  754. TP_fast_assign(
  755. __entry->r_xprt = r_xprt;
  756. __entry->reqs = reqs;
  757. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  758. __assign_str(port, rpcrdma_portstr(r_xprt));
  759. ),
  760. TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
  761. __get_str(addr), __get_str(port),
  762. __entry->r_xprt, __entry->reqs
  763. )
  764. );
  765. DEFINE_CB_EVENT(xprtrdma_cb_call);
  766. DEFINE_CB_EVENT(xprtrdma_cb_reply);
  767. TRACE_EVENT(xprtrdma_leaked_rep,
  768. TP_PROTO(
  769. const struct rpc_rqst *rqst,
  770. const struct rpcrdma_rep *rep
  771. ),
  772. TP_ARGS(rqst, rep),
  773. TP_STRUCT__entry(
  774. __field(unsigned int, task_id)
  775. __field(unsigned int, client_id)
  776. __field(u32, xid)
  777. __field(const void *, rep)
  778. ),
  779. TP_fast_assign(
  780. __entry->task_id = rqst->rq_task->tk_pid;
  781. __entry->client_id = rqst->rq_task->tk_client->cl_clid;
  782. __entry->xid = be32_to_cpu(rqst->rq_xid);
  783. __entry->rep = rep;
  784. ),
  785. TP_printk("task:%u@%u xid=0x%08x rep=%p",
  786. __entry->task_id, __entry->client_id, __entry->xid,
  787. __entry->rep
  788. )
  789. );
  790. /**
  791. ** Server-side RPC/RDMA events
  792. **/
  793. DECLARE_EVENT_CLASS(svcrdma_xprt_event,
  794. TP_PROTO(
  795. const struct svc_xprt *xprt
  796. ),
  797. TP_ARGS(xprt),
  798. TP_STRUCT__entry(
  799. __field(const void *, xprt)
  800. __string(addr, xprt->xpt_remotebuf)
  801. ),
  802. TP_fast_assign(
  803. __entry->xprt = xprt;
  804. __assign_str(addr, xprt->xpt_remotebuf);
  805. ),
  806. TP_printk("xprt=%p addr=%s",
  807. __entry->xprt, __get_str(addr)
  808. )
  809. );
  810. #define DEFINE_XPRT_EVENT(name) \
  811. DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name, \
  812. TP_PROTO( \
  813. const struct svc_xprt *xprt \
  814. ), \
  815. TP_ARGS(xprt))
  816. DEFINE_XPRT_EVENT(accept);
  817. DEFINE_XPRT_EVENT(fail);
  818. DEFINE_XPRT_EVENT(free);
  819. TRACE_DEFINE_ENUM(RDMA_MSG);
  820. TRACE_DEFINE_ENUM(RDMA_NOMSG);
  821. TRACE_DEFINE_ENUM(RDMA_MSGP);
  822. TRACE_DEFINE_ENUM(RDMA_DONE);
  823. TRACE_DEFINE_ENUM(RDMA_ERROR);
  824. #define show_rpcrdma_proc(x) \
  825. __print_symbolic(x, \
  826. { RDMA_MSG, "RDMA_MSG" }, \
  827. { RDMA_NOMSG, "RDMA_NOMSG" }, \
  828. { RDMA_MSGP, "RDMA_MSGP" }, \
  829. { RDMA_DONE, "RDMA_DONE" }, \
  830. { RDMA_ERROR, "RDMA_ERROR" })
  831. TRACE_EVENT(svcrdma_decode_rqst,
  832. TP_PROTO(
  833. __be32 *p,
  834. unsigned int hdrlen
  835. ),
  836. TP_ARGS(p, hdrlen),
  837. TP_STRUCT__entry(
  838. __field(u32, xid)
  839. __field(u32, vers)
  840. __field(u32, proc)
  841. __field(u32, credits)
  842. __field(unsigned int, hdrlen)
  843. ),
  844. TP_fast_assign(
  845. __entry->xid = be32_to_cpup(p++);
  846. __entry->vers = be32_to_cpup(p++);
  847. __entry->credits = be32_to_cpup(p++);
  848. __entry->proc = be32_to_cpup(p);
  849. __entry->hdrlen = hdrlen;
  850. ),
  851. TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
  852. __entry->xid, __entry->vers, __entry->credits,
  853. show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
  854. );
  855. TRACE_EVENT(svcrdma_decode_short,
  856. TP_PROTO(
  857. unsigned int hdrlen
  858. ),
  859. TP_ARGS(hdrlen),
  860. TP_STRUCT__entry(
  861. __field(unsigned int, hdrlen)
  862. ),
  863. TP_fast_assign(
  864. __entry->hdrlen = hdrlen;
  865. ),
  866. TP_printk("hdrlen=%u", __entry->hdrlen)
  867. );
  868. DECLARE_EVENT_CLASS(svcrdma_badreq_event,
  869. TP_PROTO(
  870. __be32 *p
  871. ),
  872. TP_ARGS(p),
  873. TP_STRUCT__entry(
  874. __field(u32, xid)
  875. __field(u32, vers)
  876. __field(u32, proc)
  877. __field(u32, credits)
  878. ),
  879. TP_fast_assign(
  880. __entry->xid = be32_to_cpup(p++);
  881. __entry->vers = be32_to_cpup(p++);
  882. __entry->credits = be32_to_cpup(p++);
  883. __entry->proc = be32_to_cpup(p);
  884. ),
  885. TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
  886. __entry->xid, __entry->vers, __entry->credits, __entry->proc)
  887. );
  888. #define DEFINE_BADREQ_EVENT(name) \
  889. DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
  890. TP_PROTO( \
  891. __be32 *p \
  892. ), \
  893. TP_ARGS(p))
  894. DEFINE_BADREQ_EVENT(badvers);
  895. DEFINE_BADREQ_EVENT(drop);
  896. DEFINE_BADREQ_EVENT(badproc);
  897. DEFINE_BADREQ_EVENT(parse);
  898. DECLARE_EVENT_CLASS(svcrdma_segment_event,
  899. TP_PROTO(
  900. u32 handle,
  901. u32 length,
  902. u64 offset
  903. ),
  904. TP_ARGS(handle, length, offset),
  905. TP_STRUCT__entry(
  906. __field(u32, handle)
  907. __field(u32, length)
  908. __field(u64, offset)
  909. ),
  910. TP_fast_assign(
  911. __entry->handle = handle;
  912. __entry->length = length;
  913. __entry->offset = offset;
  914. ),
  915. TP_printk("%u@0x%016llx:0x%08x",
  916. __entry->length, (unsigned long long)__entry->offset,
  917. __entry->handle
  918. )
  919. );
  920. #define DEFINE_SEGMENT_EVENT(name) \
  921. DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
  922. TP_PROTO( \
  923. u32 handle, \
  924. u32 length, \
  925. u64 offset \
  926. ), \
  927. TP_ARGS(handle, length, offset))
  928. DEFINE_SEGMENT_EVENT(rseg);
  929. DEFINE_SEGMENT_EVENT(wseg);
  930. DECLARE_EVENT_CLASS(svcrdma_chunk_event,
  931. TP_PROTO(
  932. u32 length
  933. ),
  934. TP_ARGS(length),
  935. TP_STRUCT__entry(
  936. __field(u32, length)
  937. ),
  938. TP_fast_assign(
  939. __entry->length = length;
  940. ),
  941. TP_printk("length=%u",
  942. __entry->length
  943. )
  944. );
  945. #define DEFINE_CHUNK_EVENT(name) \
  946. DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
  947. TP_PROTO( \
  948. u32 length \
  949. ), \
  950. TP_ARGS(length))
  951. DEFINE_CHUNK_EVENT(pzr);
  952. DEFINE_CHUNK_EVENT(write);
  953. DEFINE_CHUNK_EVENT(reply);
  954. TRACE_EVENT(svcrdma_encode_read,
  955. TP_PROTO(
  956. u32 length,
  957. u32 position
  958. ),
  959. TP_ARGS(length, position),
  960. TP_STRUCT__entry(
  961. __field(u32, length)
  962. __field(u32, position)
  963. ),
  964. TP_fast_assign(
  965. __entry->length = length;
  966. __entry->position = position;
  967. ),
  968. TP_printk("length=%u position=%u",
  969. __entry->length, __entry->position
  970. )
  971. );
  972. DECLARE_EVENT_CLASS(svcrdma_error_event,
  973. TP_PROTO(
  974. __be32 xid
  975. ),
  976. TP_ARGS(xid),
  977. TP_STRUCT__entry(
  978. __field(u32, xid)
  979. ),
  980. TP_fast_assign(
  981. __entry->xid = be32_to_cpu(xid);
  982. ),
  983. TP_printk("xid=0x%08x",
  984. __entry->xid
  985. )
  986. );
  987. #define DEFINE_ERROR_EVENT(name) \
  988. DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \
  989. TP_PROTO( \
  990. __be32 xid \
  991. ), \
  992. TP_ARGS(xid))
  993. DEFINE_ERROR_EVENT(vers);
  994. DEFINE_ERROR_EVENT(chunk);
  995. /**
  996. ** Server-side RDMA API events
  997. **/
  998. TRACE_EVENT(svcrdma_dma_map_page,
  999. TP_PROTO(
  1000. const struct svcxprt_rdma *rdma,
  1001. const void *page
  1002. ),
  1003. TP_ARGS(rdma, page),
  1004. TP_STRUCT__entry(
  1005. __field(const void *, page);
  1006. __string(device, rdma->sc_cm_id->device->name)
  1007. __string(addr, rdma->sc_xprt.xpt_remotebuf)
  1008. ),
  1009. TP_fast_assign(
  1010. __entry->page = page;
  1011. __assign_str(device, rdma->sc_cm_id->device->name);
  1012. __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
  1013. ),
  1014. TP_printk("addr=%s device=%s page=%p",
  1015. __get_str(addr), __get_str(device), __entry->page
  1016. )
  1017. );
  1018. TRACE_EVENT(svcrdma_dma_map_rwctx,
  1019. TP_PROTO(
  1020. const struct svcxprt_rdma *rdma,
  1021. int status
  1022. ),
  1023. TP_ARGS(rdma, status),
  1024. TP_STRUCT__entry(
  1025. __field(int, status)
  1026. __string(device, rdma->sc_cm_id->device->name)
  1027. __string(addr, rdma->sc_xprt.xpt_remotebuf)
  1028. ),
  1029. TP_fast_assign(
  1030. __entry->status = status;
  1031. __assign_str(device, rdma->sc_cm_id->device->name);
  1032. __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
  1033. ),
  1034. TP_printk("addr=%s device=%s status=%d",
  1035. __get_str(addr), __get_str(device), __entry->status
  1036. )
  1037. );
  1038. TRACE_EVENT(svcrdma_send_failed,
  1039. TP_PROTO(
  1040. const struct svc_rqst *rqst,
  1041. int status
  1042. ),
  1043. TP_ARGS(rqst, status),
  1044. TP_STRUCT__entry(
  1045. __field(int, status)
  1046. __field(u32, xid)
  1047. __field(const void *, xprt)
  1048. __string(addr, rqst->rq_xprt->xpt_remotebuf)
  1049. ),
  1050. TP_fast_assign(
  1051. __entry->status = status;
  1052. __entry->xid = __be32_to_cpu(rqst->rq_xid);
  1053. __entry->xprt = rqst->rq_xprt;
  1054. __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
  1055. ),
  1056. TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
  1057. __entry->xprt, __get_str(addr),
  1058. __entry->xid, __entry->status
  1059. )
  1060. );
  1061. DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
  1062. TP_PROTO(
  1063. const struct ib_wc *wc
  1064. ),
  1065. TP_ARGS(wc),
  1066. TP_STRUCT__entry(
  1067. __field(const void *, cqe)
  1068. __field(unsigned int, status)
  1069. __field(unsigned int, vendor_err)
  1070. ),
  1071. TP_fast_assign(
  1072. __entry->cqe = wc->wr_cqe;
  1073. __entry->status = wc->status;
  1074. if (wc->status)
  1075. __entry->vendor_err = wc->vendor_err;
  1076. else
  1077. __entry->vendor_err = 0;
  1078. ),
  1079. TP_printk("cqe=%p status=%s (%u/0x%x)",
  1080. __entry->cqe, rdma_show_wc_status(__entry->status),
  1081. __entry->status, __entry->vendor_err
  1082. )
  1083. );
  1084. #define DEFINE_SENDCOMP_EVENT(name) \
  1085. DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name, \
  1086. TP_PROTO( \
  1087. const struct ib_wc *wc \
  1088. ), \
  1089. TP_ARGS(wc))
  1090. TRACE_EVENT(svcrdma_post_send,
  1091. TP_PROTO(
  1092. const struct ib_send_wr *wr,
  1093. int status
  1094. ),
  1095. TP_ARGS(wr, status),
  1096. TP_STRUCT__entry(
  1097. __field(const void *, cqe)
  1098. __field(unsigned int, num_sge)
  1099. __field(u32, inv_rkey)
  1100. __field(int, status)
  1101. ),
  1102. TP_fast_assign(
  1103. __entry->cqe = wr->wr_cqe;
  1104. __entry->num_sge = wr->num_sge;
  1105. __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
  1106. wr->ex.invalidate_rkey : 0;
  1107. __entry->status = status;
  1108. ),
  1109. TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
  1110. __entry->cqe, __entry->num_sge,
  1111. __entry->inv_rkey, __entry->status
  1112. )
  1113. );
  1114. DEFINE_SENDCOMP_EVENT(send);
  1115. TRACE_EVENT(svcrdma_post_recv,
  1116. TP_PROTO(
  1117. const struct ib_recv_wr *wr,
  1118. int status
  1119. ),
  1120. TP_ARGS(wr, status),
  1121. TP_STRUCT__entry(
  1122. __field(const void *, cqe)
  1123. __field(int, status)
  1124. ),
  1125. TP_fast_assign(
  1126. __entry->cqe = wr->wr_cqe;
  1127. __entry->status = status;
  1128. ),
  1129. TP_printk("cqe=%p status=%d",
  1130. __entry->cqe, __entry->status
  1131. )
  1132. );
  1133. TRACE_EVENT(svcrdma_wc_receive,
  1134. TP_PROTO(
  1135. const struct ib_wc *wc
  1136. ),
  1137. TP_ARGS(wc),
  1138. TP_STRUCT__entry(
  1139. __field(const void *, cqe)
  1140. __field(u32, byte_len)
  1141. __field(unsigned int, status)
  1142. __field(u32, vendor_err)
  1143. ),
  1144. TP_fast_assign(
  1145. __entry->cqe = wc->wr_cqe;
  1146. __entry->status = wc->status;
  1147. if (wc->status) {
  1148. __entry->byte_len = 0;
  1149. __entry->vendor_err = wc->vendor_err;
  1150. } else {
  1151. __entry->byte_len = wc->byte_len;
  1152. __entry->vendor_err = 0;
  1153. }
  1154. ),
  1155. TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
  1156. __entry->cqe, __entry->byte_len,
  1157. rdma_show_wc_status(__entry->status),
  1158. __entry->status, __entry->vendor_err
  1159. )
  1160. );
  1161. TRACE_EVENT(svcrdma_post_rw,
  1162. TP_PROTO(
  1163. const void *cqe,
  1164. int sqecount,
  1165. int status
  1166. ),
  1167. TP_ARGS(cqe, sqecount, status),
  1168. TP_STRUCT__entry(
  1169. __field(const void *, cqe)
  1170. __field(int, sqecount)
  1171. __field(int, status)
  1172. ),
  1173. TP_fast_assign(
  1174. __entry->cqe = cqe;
  1175. __entry->sqecount = sqecount;
  1176. __entry->status = status;
  1177. ),
  1178. TP_printk("cqe=%p sqecount=%d status=%d",
  1179. __entry->cqe, __entry->sqecount, __entry->status
  1180. )
  1181. );
  1182. DEFINE_SENDCOMP_EVENT(read);
  1183. DEFINE_SENDCOMP_EVENT(write);
  1184. TRACE_EVENT(svcrdma_cm_event,
  1185. TP_PROTO(
  1186. const struct rdma_cm_event *event,
  1187. const struct sockaddr *sap
  1188. ),
  1189. TP_ARGS(event, sap),
  1190. TP_STRUCT__entry(
  1191. __field(unsigned int, event)
  1192. __field(int, status)
  1193. __array(__u8, addr, INET6_ADDRSTRLEN + 10)
  1194. ),
  1195. TP_fast_assign(
  1196. __entry->event = event->event;
  1197. __entry->status = event->status;
  1198. snprintf(__entry->addr, sizeof(__entry->addr) - 1,
  1199. "%pISpc", sap);
  1200. ),
  1201. TP_printk("addr=%s event=%s (%u/%d)",
  1202. __entry->addr,
  1203. rdma_show_cm_event(__entry->event),
  1204. __entry->event, __entry->status
  1205. )
  1206. );
  1207. TRACE_EVENT(svcrdma_qp_error,
  1208. TP_PROTO(
  1209. const struct ib_event *event,
  1210. const struct sockaddr *sap
  1211. ),
  1212. TP_ARGS(event, sap),
  1213. TP_STRUCT__entry(
  1214. __field(unsigned int, event)
  1215. __string(device, event->device->name)
  1216. __array(__u8, addr, INET6_ADDRSTRLEN + 10)
  1217. ),
  1218. TP_fast_assign(
  1219. __entry->event = event->event;
  1220. __assign_str(device, event->device->name);
  1221. snprintf(__entry->addr, sizeof(__entry->addr) - 1,
  1222. "%pISpc", sap);
  1223. ),
  1224. TP_printk("addr=%s dev=%s event=%s (%u)",
  1225. __entry->addr, __get_str(device),
  1226. rdma_show_ib_event(__entry->event), __entry->event
  1227. )
  1228. );
  1229. DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
  1230. TP_PROTO(
  1231. const struct svcxprt_rdma *rdma
  1232. ),
  1233. TP_ARGS(rdma),
  1234. TP_STRUCT__entry(
  1235. __field(int, avail)
  1236. __field(int, depth)
  1237. __string(addr, rdma->sc_xprt.xpt_remotebuf)
  1238. ),
  1239. TP_fast_assign(
  1240. __entry->avail = atomic_read(&rdma->sc_sq_avail);
  1241. __entry->depth = rdma->sc_sq_depth;
  1242. __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
  1243. ),
  1244. TP_printk("addr=%s sc_sq_avail=%d/%d",
  1245. __get_str(addr), __entry->avail, __entry->depth
  1246. )
  1247. );
  1248. #define DEFINE_SQ_EVENT(name) \
  1249. DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
  1250. TP_PROTO( \
  1251. const struct svcxprt_rdma *rdma \
  1252. ), \
  1253. TP_ARGS(rdma))
  1254. DEFINE_SQ_EVENT(full);
  1255. DEFINE_SQ_EVENT(retry);
  1256. #endif /* _TRACE_RPCRDMA_H */
  1257. #include <trace/define_trace.h>