trace_tx.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. /*
  2. * Copyright(c) 2015 - 2017 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. #if !defined(__HFI1_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
  48. #define __HFI1_TRACE_TX_H
  49. #include <linux/tracepoint.h>
  50. #include <linux/trace_seq.h>
  51. #include "hfi.h"
  52. #include "mad.h"
  53. #include "sdma.h"
  54. const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
  55. #define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
  56. #undef TRACE_SYSTEM
  57. #define TRACE_SYSTEM hfi1_tx
  58. TRACE_EVENT(hfi1_piofree,
  59. TP_PROTO(struct send_context *sc, int extra),
  60. TP_ARGS(sc, extra),
  61. TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
  62. __field(u32, sw_index)
  63. __field(u32, hw_context)
  64. __field(int, extra)
  65. ),
  66. TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
  67. __entry->sw_index = sc->sw_index;
  68. __entry->hw_context = sc->hw_context;
  69. __entry->extra = extra;
  70. ),
  71. TP_printk("[%s] ctxt %u(%u) extra %d",
  72. __get_str(dev),
  73. __entry->sw_index,
  74. __entry->hw_context,
  75. __entry->extra
  76. )
  77. );
  78. TRACE_EVENT(hfi1_wantpiointr,
  79. TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
  80. TP_ARGS(sc, needint, credit_ctrl),
  81. TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
  82. __field(u32, sw_index)
  83. __field(u32, hw_context)
  84. __field(u32, needint)
  85. __field(u64, credit_ctrl)
  86. ),
  87. TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
  88. __entry->sw_index = sc->sw_index;
  89. __entry->hw_context = sc->hw_context;
  90. __entry->needint = needint;
  91. __entry->credit_ctrl = credit_ctrl;
  92. ),
  93. TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
  94. __get_str(dev),
  95. __entry->sw_index,
  96. __entry->hw_context,
  97. __entry->needint,
  98. (unsigned long long)__entry->credit_ctrl
  99. )
  100. );
  101. DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
  102. TP_PROTO(struct rvt_qp *qp, u32 flags),
  103. TP_ARGS(qp, flags),
  104. TP_STRUCT__entry(
  105. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  106. __field(u32, qpn)
  107. __field(u32, flags)
  108. __field(u32, s_flags)
  109. ),
  110. TP_fast_assign(
  111. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
  112. __entry->flags = flags;
  113. __entry->qpn = qp->ibqp.qp_num;
  114. __entry->s_flags = qp->s_flags;
  115. ),
  116. TP_printk(
  117. "[%s] qpn 0x%x flags 0x%x s_flags 0x%x",
  118. __get_str(dev),
  119. __entry->qpn,
  120. __entry->flags,
  121. __entry->s_flags
  122. )
  123. );
  124. DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
  125. TP_PROTO(struct rvt_qp *qp, u32 flags),
  126. TP_ARGS(qp, flags));
  127. DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
  128. TP_PROTO(struct rvt_qp *qp, u32 flags),
  129. TP_ARGS(qp, flags));
  130. TRACE_EVENT(hfi1_sdma_descriptor,
  131. TP_PROTO(struct sdma_engine *sde,
  132. u64 desc0,
  133. u64 desc1,
  134. u16 e,
  135. void *descp),
  136. TP_ARGS(sde, desc0, desc1, e, descp),
  137. TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
  138. __field(void *, descp)
  139. __field(u64, desc0)
  140. __field(u64, desc1)
  141. __field(u16, e)
  142. __field(u8, idx)
  143. ),
  144. TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
  145. __entry->desc0 = desc0;
  146. __entry->desc1 = desc1;
  147. __entry->idx = sde->this_idx;
  148. __entry->descp = descp;
  149. __entry->e = e;
  150. ),
  151. TP_printk(
  152. "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
  153. __get_str(dev),
  154. __entry->idx,
  155. __parse_sdma_flags(__entry->desc0, __entry->desc1),
  156. (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
  157. SDMA_DESC0_PHY_ADDR_MASK,
  158. (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
  159. SDMA_DESC1_GENERATION_MASK),
  160. (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
  161. SDMA_DESC0_BYTE_COUNT_MASK),
  162. __entry->desc0,
  163. __entry->desc1,
  164. __entry->descp,
  165. __entry->e
  166. )
  167. );
  168. TRACE_EVENT(hfi1_sdma_engine_select,
  169. TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
  170. TP_ARGS(dd, sel, vl, idx),
  171. TP_STRUCT__entry(DD_DEV_ENTRY(dd)
  172. __field(u32, sel)
  173. __field(u8, vl)
  174. __field(u8, idx)
  175. ),
  176. TP_fast_assign(DD_DEV_ASSIGN(dd);
  177. __entry->sel = sel;
  178. __entry->vl = vl;
  179. __entry->idx = idx;
  180. ),
  181. TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
  182. __get_str(dev),
  183. __entry->idx,
  184. __entry->sel,
  185. __entry->vl
  186. )
  187. );
  188. TRACE_EVENT(hfi1_sdma_user_free_queues,
  189. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt),
  190. TP_ARGS(dd, ctxt, subctxt),
  191. TP_STRUCT__entry(DD_DEV_ENTRY(dd)
  192. __field(u16, ctxt)
  193. __field(u16, subctxt)
  194. ),
  195. TP_fast_assign(DD_DEV_ASSIGN(dd);
  196. __entry->ctxt = ctxt;
  197. __entry->subctxt = subctxt;
  198. ),
  199. TP_printk("[%s] SDMA [%u:%u] Freeing user SDMA queues",
  200. __get_str(dev),
  201. __entry->ctxt,
  202. __entry->subctxt
  203. )
  204. );
  205. TRACE_EVENT(hfi1_sdma_user_process_request,
  206. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
  207. u16 comp_idx),
  208. TP_ARGS(dd, ctxt, subctxt, comp_idx),
  209. TP_STRUCT__entry(DD_DEV_ENTRY(dd)
  210. __field(u16, ctxt)
  211. __field(u16, subctxt)
  212. __field(u16, comp_idx)
  213. ),
  214. TP_fast_assign(DD_DEV_ASSIGN(dd);
  215. __entry->ctxt = ctxt;
  216. __entry->subctxt = subctxt;
  217. __entry->comp_idx = comp_idx;
  218. ),
  219. TP_printk("[%s] SDMA [%u:%u] Using req/comp entry: %u",
  220. __get_str(dev),
  221. __entry->ctxt,
  222. __entry->subctxt,
  223. __entry->comp_idx
  224. )
  225. );
  226. DECLARE_EVENT_CLASS(
  227. hfi1_sdma_value_template,
  228. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, u16 comp_idx,
  229. u32 value),
  230. TP_ARGS(dd, ctxt, subctxt, comp_idx, value),
  231. TP_STRUCT__entry(DD_DEV_ENTRY(dd)
  232. __field(u16, ctxt)
  233. __field(u16, subctxt)
  234. __field(u16, comp_idx)
  235. __field(u32, value)
  236. ),
  237. TP_fast_assign(DD_DEV_ASSIGN(dd);
  238. __entry->ctxt = ctxt;
  239. __entry->subctxt = subctxt;
  240. __entry->comp_idx = comp_idx;
  241. __entry->value = value;
  242. ),
  243. TP_printk("[%s] SDMA [%u:%u:%u] value: %u",
  244. __get_str(dev),
  245. __entry->ctxt,
  246. __entry->subctxt,
  247. __entry->comp_idx,
  248. __entry->value
  249. )
  250. );
  251. DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_initial_tidoffset,
  252. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
  253. u16 comp_idx, u32 tidoffset),
  254. TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset));
  255. DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_data_length,
  256. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
  257. u16 comp_idx, u32 data_len),
  258. TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
  259. DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_compute_length,
  260. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
  261. u16 comp_idx, u32 data_len),
  262. TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
  263. TRACE_EVENT(hfi1_sdma_user_tid_info,
  264. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
  265. u16 comp_idx, u32 tidoffset, u32 units, u8 shift),
  266. TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset, units, shift),
  267. TP_STRUCT__entry(DD_DEV_ENTRY(dd)
  268. __field(u16, ctxt)
  269. __field(u16, subctxt)
  270. __field(u16, comp_idx)
  271. __field(u32, tidoffset)
  272. __field(u32, units)
  273. __field(u8, shift)
  274. ),
  275. TP_fast_assign(DD_DEV_ASSIGN(dd);
  276. __entry->ctxt = ctxt;
  277. __entry->subctxt = subctxt;
  278. __entry->comp_idx = comp_idx;
  279. __entry->tidoffset = tidoffset;
  280. __entry->units = units;
  281. __entry->shift = shift;
  282. ),
  283. TP_printk("[%s] SDMA [%u:%u:%u] TID offset %ubytes %uunits om %u",
  284. __get_str(dev),
  285. __entry->ctxt,
  286. __entry->subctxt,
  287. __entry->comp_idx,
  288. __entry->tidoffset,
  289. __entry->units,
  290. __entry->shift
  291. )
  292. );
  293. TRACE_EVENT(hfi1_sdma_request,
  294. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
  295. unsigned long dim),
  296. TP_ARGS(dd, ctxt, subctxt, dim),
  297. TP_STRUCT__entry(DD_DEV_ENTRY(dd)
  298. __field(u16, ctxt)
  299. __field(u16, subctxt)
  300. __field(unsigned long, dim)
  301. ),
  302. TP_fast_assign(DD_DEV_ASSIGN(dd);
  303. __entry->ctxt = ctxt;
  304. __entry->subctxt = subctxt;
  305. __entry->dim = dim;
  306. ),
  307. TP_printk("[%s] SDMA from %u:%u (%lu)",
  308. __get_str(dev),
  309. __entry->ctxt,
  310. __entry->subctxt,
  311. __entry->dim
  312. )
  313. );
  314. DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
  315. TP_PROTO(struct sdma_engine *sde, u64 status),
  316. TP_ARGS(sde, status),
  317. TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
  318. __field(u64, status)
  319. __field(u8, idx)
  320. ),
  321. TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
  322. __entry->status = status;
  323. __entry->idx = sde->this_idx;
  324. ),
  325. TP_printk("[%s] SDE(%u) status %llx",
  326. __get_str(dev),
  327. __entry->idx,
  328. (unsigned long long)__entry->status
  329. )
  330. );
  331. DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
  332. TP_PROTO(struct sdma_engine *sde, u64 status),
  333. TP_ARGS(sde, status)
  334. );
  335. DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
  336. TP_PROTO(struct sdma_engine *sde, u64 status),
  337. TP_ARGS(sde, status)
  338. );
  339. DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
  340. TP_PROTO(struct sdma_engine *sde, int aidx),
  341. TP_ARGS(sde, aidx),
  342. TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
  343. __field(int, aidx)
  344. __field(u8, idx)
  345. ),
  346. TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
  347. __entry->idx = sde->this_idx;
  348. __entry->aidx = aidx;
  349. ),
  350. TP_printk("[%s] SDE(%u) aidx %d",
  351. __get_str(dev),
  352. __entry->idx,
  353. __entry->aidx
  354. )
  355. );
  356. DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
  357. TP_PROTO(struct sdma_engine *sde, int aidx),
  358. TP_ARGS(sde, aidx));
  359. DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
  360. TP_PROTO(struct sdma_engine *sde, int aidx),
  361. TP_ARGS(sde, aidx));
  362. #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
  363. TRACE_EVENT(hfi1_sdma_progress,
  364. TP_PROTO(struct sdma_engine *sde,
  365. u16 hwhead,
  366. u16 swhead,
  367. struct sdma_txreq *txp
  368. ),
  369. TP_ARGS(sde, hwhead, swhead, txp),
  370. TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
  371. __field(u64, sn)
  372. __field(u16, hwhead)
  373. __field(u16, swhead)
  374. __field(u16, txnext)
  375. __field(u16, tx_tail)
  376. __field(u16, tx_head)
  377. __field(u8, idx)
  378. ),
  379. TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
  380. __entry->hwhead = hwhead;
  381. __entry->swhead = swhead;
  382. __entry->tx_tail = sde->tx_tail;
  383. __entry->tx_head = sde->tx_head;
  384. __entry->txnext = txp ? txp->next_descq_idx : ~0;
  385. __entry->idx = sde->this_idx;
  386. __entry->sn = txp ? txp->sn : ~0;
  387. ),
  388. TP_printk(
  389. "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
  390. __get_str(dev),
  391. __entry->idx,
  392. __entry->sn,
  393. __entry->hwhead,
  394. __entry->swhead,
  395. __entry->txnext,
  396. __entry->tx_head,
  397. __entry->tx_tail
  398. )
  399. );
  400. #else
  401. TRACE_EVENT(hfi1_sdma_progress,
  402. TP_PROTO(struct sdma_engine *sde,
  403. u16 hwhead, u16 swhead,
  404. struct sdma_txreq *txp
  405. ),
  406. TP_ARGS(sde, hwhead, swhead, txp),
  407. TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
  408. __field(u16, hwhead)
  409. __field(u16, swhead)
  410. __field(u16, txnext)
  411. __field(u16, tx_tail)
  412. __field(u16, tx_head)
  413. __field(u8, idx)
  414. ),
  415. TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
  416. __entry->hwhead = hwhead;
  417. __entry->swhead = swhead;
  418. __entry->tx_tail = sde->tx_tail;
  419. __entry->tx_head = sde->tx_head;
  420. __entry->txnext = txp ? txp->next_descq_idx : ~0;
  421. __entry->idx = sde->this_idx;
  422. ),
  423. TP_printk(
  424. "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
  425. __get_str(dev),
  426. __entry->idx,
  427. __entry->hwhead,
  428. __entry->swhead,
  429. __entry->txnext,
  430. __entry->tx_head,
  431. __entry->tx_tail
  432. )
  433. );
  434. #endif
  435. DECLARE_EVENT_CLASS(hfi1_sdma_sn,
  436. TP_PROTO(struct sdma_engine *sde, u64 sn),
  437. TP_ARGS(sde, sn),
  438. TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
  439. __field(u64, sn)
  440. __field(u8, idx)
  441. ),
  442. TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
  443. __entry->sn = sn;
  444. __entry->idx = sde->this_idx;
  445. ),
  446. TP_printk("[%s] SDE(%u) sn %llu",
  447. __get_str(dev),
  448. __entry->idx,
  449. __entry->sn
  450. )
  451. );
  452. DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
  453. TP_PROTO(
  454. struct sdma_engine *sde,
  455. u64 sn
  456. ),
  457. TP_ARGS(sde, sn)
  458. );
  459. DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
  460. TP_PROTO(struct sdma_engine *sde, u64 sn),
  461. TP_ARGS(sde, sn)
  462. );
  463. #define USDMA_HDR_FORMAT \
  464. "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
  465. TRACE_EVENT(hfi1_sdma_user_header,
  466. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
  467. struct hfi1_pkt_header *hdr, u32 tidval),
  468. TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
  469. TP_STRUCT__entry(
  470. DD_DEV_ENTRY(dd)
  471. __field(u16, ctxt)
  472. __field(u8, subctxt)
  473. __field(u16, req)
  474. __field(u32, pbc0)
  475. __field(u32, pbc1)
  476. __field(u32, lrh0)
  477. __field(u32, lrh1)
  478. __field(u32, bth0)
  479. __field(u32, bth1)
  480. __field(u32, bth2)
  481. __field(u32, kdeth0)
  482. __field(u32, kdeth1)
  483. __field(u32, kdeth2)
  484. __field(u32, kdeth3)
  485. __field(u32, kdeth4)
  486. __field(u32, kdeth5)
  487. __field(u32, kdeth6)
  488. __field(u32, kdeth7)
  489. __field(u32, kdeth8)
  490. __field(u32, tidval)
  491. ),
  492. TP_fast_assign(
  493. __le32 *pbc = (__le32 *)hdr->pbc;
  494. __be32 *lrh = (__be32 *)hdr->lrh;
  495. __be32 *bth = (__be32 *)hdr->bth;
  496. __le32 *kdeth = (__le32 *)&hdr->kdeth;
  497. DD_DEV_ASSIGN(dd);
  498. __entry->ctxt = ctxt;
  499. __entry->subctxt = subctxt;
  500. __entry->req = req;
  501. __entry->pbc0 = le32_to_cpu(pbc[0]);
  502. __entry->pbc1 = le32_to_cpu(pbc[1]);
  503. __entry->lrh0 = be32_to_cpu(lrh[0]);
  504. __entry->lrh1 = be32_to_cpu(lrh[1]);
  505. __entry->bth0 = be32_to_cpu(bth[0]);
  506. __entry->bth1 = be32_to_cpu(bth[1]);
  507. __entry->bth2 = be32_to_cpu(bth[2]);
  508. __entry->kdeth0 = le32_to_cpu(kdeth[0]);
  509. __entry->kdeth1 = le32_to_cpu(kdeth[1]);
  510. __entry->kdeth2 = le32_to_cpu(kdeth[2]);
  511. __entry->kdeth3 = le32_to_cpu(kdeth[3]);
  512. __entry->kdeth4 = le32_to_cpu(kdeth[4]);
  513. __entry->kdeth5 = le32_to_cpu(kdeth[5]);
  514. __entry->kdeth6 = le32_to_cpu(kdeth[6]);
  515. __entry->kdeth7 = le32_to_cpu(kdeth[7]);
  516. __entry->kdeth8 = le32_to_cpu(kdeth[8]);
  517. __entry->tidval = tidval;
  518. ),
  519. TP_printk(USDMA_HDR_FORMAT,
  520. __get_str(dev),
  521. __entry->ctxt,
  522. __entry->subctxt,
  523. __entry->req,
  524. __entry->pbc1,
  525. __entry->pbc0,
  526. __entry->lrh0,
  527. __entry->lrh1,
  528. __entry->bth0,
  529. __entry->bth1,
  530. __entry->bth2,
  531. __entry->kdeth0,
  532. __entry->kdeth1,
  533. __entry->kdeth2,
  534. __entry->kdeth3,
  535. __entry->kdeth4,
  536. __entry->kdeth5,
  537. __entry->kdeth6,
  538. __entry->kdeth7,
  539. __entry->kdeth8,
  540. __entry->tidval
  541. )
  542. );
  543. #define SDMA_UREQ_FMT \
  544. "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
  545. TRACE_EVENT(hfi1_sdma_user_reqinfo,
  546. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
  547. TP_ARGS(dd, ctxt, subctxt, i),
  548. TP_STRUCT__entry(
  549. DD_DEV_ENTRY(dd);
  550. __field(u16, ctxt)
  551. __field(u8, subctxt)
  552. __field(u8, ver_opcode)
  553. __field(u8, iovcnt)
  554. __field(u16, npkts)
  555. __field(u16, fragsize)
  556. __field(u16, comp_idx)
  557. ),
  558. TP_fast_assign(
  559. DD_DEV_ASSIGN(dd);
  560. __entry->ctxt = ctxt;
  561. __entry->subctxt = subctxt;
  562. __entry->ver_opcode = i[0] & 0xff;
  563. __entry->iovcnt = (i[0] >> 8) & 0xff;
  564. __entry->npkts = i[1];
  565. __entry->fragsize = i[2];
  566. __entry->comp_idx = i[3];
  567. ),
  568. TP_printk(SDMA_UREQ_FMT,
  569. __get_str(dev),
  570. __entry->ctxt,
  571. __entry->subctxt,
  572. __entry->ver_opcode,
  573. __entry->iovcnt,
  574. __entry->npkts,
  575. __entry->fragsize,
  576. __entry->comp_idx
  577. )
  578. );
  579. #define usdma_complete_name(st) { st, #st }
  580. #define show_usdma_complete_state(st) \
  581. __print_symbolic(st, \
  582. usdma_complete_name(FREE), \
  583. usdma_complete_name(QUEUED), \
  584. usdma_complete_name(COMPLETE), \
  585. usdma_complete_name(ERROR))
  586. TRACE_EVENT(hfi1_sdma_user_completion,
  587. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
  588. u8 state, int code),
  589. TP_ARGS(dd, ctxt, subctxt, idx, state, code),
  590. TP_STRUCT__entry(
  591. DD_DEV_ENTRY(dd)
  592. __field(u16, ctxt)
  593. __field(u8, subctxt)
  594. __field(u16, idx)
  595. __field(u8, state)
  596. __field(int, code)
  597. ),
  598. TP_fast_assign(
  599. DD_DEV_ASSIGN(dd);
  600. __entry->ctxt = ctxt;
  601. __entry->subctxt = subctxt;
  602. __entry->idx = idx;
  603. __entry->state = state;
  604. __entry->code = code;
  605. ),
  606. TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
  607. __get_str(dev), __entry->ctxt, __entry->subctxt,
  608. __entry->idx, show_usdma_complete_state(__entry->state),
  609. __entry->code)
  610. );
  611. const char *print_u32_array(struct trace_seq *, u32 *, int);
  612. #define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
  613. TRACE_EVENT(hfi1_sdma_user_header_ahg,
  614. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
  615. u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
  616. TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
  617. TP_STRUCT__entry(
  618. DD_DEV_ENTRY(dd)
  619. __field(u16, ctxt)
  620. __field(u8, subctxt)
  621. __field(u16, req)
  622. __field(u8, sde)
  623. __field(u8, idx)
  624. __field(int, len)
  625. __field(u32, tidval)
  626. __array(u32, ahg, 10)
  627. ),
  628. TP_fast_assign(
  629. DD_DEV_ASSIGN(dd);
  630. __entry->ctxt = ctxt;
  631. __entry->subctxt = subctxt;
  632. __entry->req = req;
  633. __entry->sde = sde;
  634. __entry->idx = ahgidx;
  635. __entry->len = len;
  636. __entry->tidval = tidval;
  637. memcpy(__entry->ahg, ahg, len * sizeof(u32));
  638. ),
  639. TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
  640. __get_str(dev),
  641. __entry->ctxt,
  642. __entry->subctxt,
  643. __entry->req,
  644. __entry->sde,
  645. __entry->idx,
  646. __entry->len - 1,
  647. __print_u32_hex(__entry->ahg, __entry->len),
  648. __entry->tidval
  649. )
  650. );
  651. TRACE_EVENT(hfi1_sdma_state,
  652. TP_PROTO(struct sdma_engine *sde,
  653. const char *cstate,
  654. const char *nstate
  655. ),
  656. TP_ARGS(sde, cstate, nstate),
  657. TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
  658. __string(curstate, cstate)
  659. __string(newstate, nstate)
  660. ),
  661. TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
  662. __assign_str(curstate, cstate);
  663. __assign_str(newstate, nstate);
  664. ),
  665. TP_printk("[%s] current state %s new state %s",
  666. __get_str(dev),
  667. __get_str(curstate),
  668. __get_str(newstate)
  669. )
  670. );
  671. #define BCT_FORMAT \
  672. "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
  673. #define BCT(field) \
  674. be16_to_cpu( \
  675. ((struct buffer_control *)__get_dynamic_array(bct))->field \
  676. )
  677. DECLARE_EVENT_CLASS(hfi1_bct_template,
  678. TP_PROTO(struct hfi1_devdata *dd,
  679. struct buffer_control *bc),
  680. TP_ARGS(dd, bc),
  681. TP_STRUCT__entry(DD_DEV_ENTRY(dd)
  682. __dynamic_array(u8, bct, sizeof(*bc))
  683. ),
  684. TP_fast_assign(DD_DEV_ASSIGN(dd);
  685. memcpy(__get_dynamic_array(bct), bc,
  686. sizeof(*bc));
  687. ),
  688. TP_printk(BCT_FORMAT,
  689. BCT(overall_shared_limit),
  690. BCT(vl[0].dedicated),
  691. BCT(vl[0].shared),
  692. BCT(vl[1].dedicated),
  693. BCT(vl[1].shared),
  694. BCT(vl[2].dedicated),
  695. BCT(vl[2].shared),
  696. BCT(vl[3].dedicated),
  697. BCT(vl[3].shared),
  698. BCT(vl[4].dedicated),
  699. BCT(vl[4].shared),
  700. BCT(vl[5].dedicated),
  701. BCT(vl[5].shared),
  702. BCT(vl[6].dedicated),
  703. BCT(vl[6].shared),
  704. BCT(vl[7].dedicated),
  705. BCT(vl[7].shared),
  706. BCT(vl[15].dedicated),
  707. BCT(vl[15].shared)
  708. )
  709. );
  710. DEFINE_EVENT(hfi1_bct_template, bct_set,
  711. TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
  712. TP_ARGS(dd, bc));
  713. DEFINE_EVENT(hfi1_bct_template, bct_get,
  714. TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
  715. TP_ARGS(dd, bc));
  716. TRACE_EVENT(
  717. hfi1_qp_send_completion,
  718. TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
  719. TP_ARGS(qp, wqe, idx),
  720. TP_STRUCT__entry(
  721. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  722. __field(struct rvt_swqe *, wqe)
  723. __field(u64, wr_id)
  724. __field(u32, qpn)
  725. __field(u32, qpt)
  726. __field(u32, length)
  727. __field(u32, idx)
  728. __field(u32, ssn)
  729. __field(enum ib_wr_opcode, opcode)
  730. __field(int, send_flags)
  731. ),
  732. TP_fast_assign(
  733. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
  734. __entry->wqe = wqe;
  735. __entry->wr_id = wqe->wr.wr_id;
  736. __entry->qpn = qp->ibqp.qp_num;
  737. __entry->qpt = qp->ibqp.qp_type;
  738. __entry->length = wqe->length;
  739. __entry->idx = idx;
  740. __entry->ssn = wqe->ssn;
  741. __entry->opcode = wqe->wr.opcode;
  742. __entry->send_flags = wqe->wr.send_flags;
  743. ),
  744. TP_printk(
  745. "[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
  746. __get_str(dev),
  747. __entry->qpn,
  748. __entry->qpt,
  749. __entry->wqe,
  750. __entry->idx,
  751. __entry->wr_id,
  752. __entry->length,
  753. __entry->ssn,
  754. __entry->opcode,
  755. __entry->send_flags
  756. )
  757. );
  758. DECLARE_EVENT_CLASS(
  759. hfi1_do_send_template,
  760. TP_PROTO(struct rvt_qp *qp, bool flag),
  761. TP_ARGS(qp, flag),
  762. TP_STRUCT__entry(
  763. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  764. __field(u32, qpn)
  765. __field(bool, flag)
  766. ),
  767. TP_fast_assign(
  768. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
  769. __entry->qpn = qp->ibqp.qp_num;
  770. __entry->flag = flag;
  771. ),
  772. TP_printk(
  773. "[%s] qpn %x flag %d",
  774. __get_str(dev),
  775. __entry->qpn,
  776. __entry->flag
  777. )
  778. );
  779. DEFINE_EVENT(
  780. hfi1_do_send_template, hfi1_rc_do_send,
  781. TP_PROTO(struct rvt_qp *qp, bool flag),
  782. TP_ARGS(qp, flag)
  783. );
  784. DEFINE_EVENT(
  785. hfi1_do_send_template, hfi1_rc_expired_time_slice,
  786. TP_PROTO(struct rvt_qp *qp, bool flag),
  787. TP_ARGS(qp, flag)
  788. );
  789. #endif /* __HFI1_TRACE_TX_H */
  790. #undef TRACE_INCLUDE_PATH
  791. #undef TRACE_INCLUDE_FILE
  792. #define TRACE_INCLUDE_PATH .
  793. #define TRACE_INCLUDE_FILE trace_tx
  794. #include <trace/define_trace.h>