bcache.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487
  1. #undef TRACE_SYSTEM
  2. #define TRACE_SYSTEM bcache
  3. #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
  4. #define _TRACE_BCACHE_H
  5. #include <linux/tracepoint.h>
  6. DECLARE_EVENT_CLASS(bcache_request,
  7. TP_PROTO(struct bcache_device *d, struct bio *bio),
  8. TP_ARGS(d, bio),
  9. TP_STRUCT__entry(
  10. __field(dev_t, dev )
  11. __field(unsigned int, orig_major )
  12. __field(unsigned int, orig_minor )
  13. __field(sector_t, sector )
  14. __field(dev_t, orig_sector )
  15. __field(unsigned int, nr_sector )
  16. __array(char, rwbs, 6 )
  17. ),
  18. TP_fast_assign(
  19. __entry->dev = bio->bi_bdev->bd_dev;
  20. __entry->orig_major = d->disk->major;
  21. __entry->orig_minor = d->disk->first_minor;
  22. __entry->sector = bio->bi_iter.bi_sector;
  23. __entry->orig_sector = bio->bi_iter.bi_sector - 16;
  24. __entry->nr_sector = bio->bi_iter.bi_size >> 9;
  25. blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
  26. bio->bi_iter.bi_size);
  27. ),
  28. TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
  29. MAJOR(__entry->dev), MINOR(__entry->dev),
  30. __entry->rwbs, (unsigned long long)__entry->sector,
  31. __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
  32. (unsigned long long)__entry->orig_sector)
  33. );
  34. DECLARE_EVENT_CLASS(bkey,
  35. TP_PROTO(struct bkey *k),
  36. TP_ARGS(k),
  37. TP_STRUCT__entry(
  38. __field(u32, size )
  39. __field(u32, inode )
  40. __field(u64, offset )
  41. __field(bool, dirty )
  42. ),
  43. TP_fast_assign(
  44. __entry->inode = KEY_INODE(k);
  45. __entry->offset = KEY_OFFSET(k);
  46. __entry->size = KEY_SIZE(k);
  47. __entry->dirty = KEY_DIRTY(k);
  48. ),
  49. TP_printk("%u:%llu len %u dirty %u", __entry->inode,
  50. __entry->offset, __entry->size, __entry->dirty)
  51. );
  52. DECLARE_EVENT_CLASS(btree_node,
  53. TP_PROTO(struct btree *b),
  54. TP_ARGS(b),
  55. TP_STRUCT__entry(
  56. __field(size_t, bucket )
  57. ),
  58. TP_fast_assign(
  59. __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
  60. ),
  61. TP_printk("bucket %zu", __entry->bucket)
  62. );
  63. /* request.c */
  64. DEFINE_EVENT(bcache_request, bcache_request_start,
  65. TP_PROTO(struct bcache_device *d, struct bio *bio),
  66. TP_ARGS(d, bio)
  67. );
  68. DEFINE_EVENT(bcache_request, bcache_request_end,
  69. TP_PROTO(struct bcache_device *d, struct bio *bio),
  70. TP_ARGS(d, bio)
  71. );
  72. DECLARE_EVENT_CLASS(bcache_bio,
  73. TP_PROTO(struct bio *bio),
  74. TP_ARGS(bio),
  75. TP_STRUCT__entry(
  76. __field(dev_t, dev )
  77. __field(sector_t, sector )
  78. __field(unsigned int, nr_sector )
  79. __array(char, rwbs, 6 )
  80. ),
  81. TP_fast_assign(
  82. __entry->dev = bio->bi_bdev->bd_dev;
  83. __entry->sector = bio->bi_iter.bi_sector;
  84. __entry->nr_sector = bio->bi_iter.bi_size >> 9;
  85. blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
  86. bio->bi_iter.bi_size);
  87. ),
  88. TP_printk("%d,%d %s %llu + %u",
  89. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
  90. (unsigned long long)__entry->sector, __entry->nr_sector)
  91. );
  92. DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
  93. TP_PROTO(struct bio *bio),
  94. TP_ARGS(bio)
  95. );
  96. DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
  97. TP_PROTO(struct bio *bio),
  98. TP_ARGS(bio)
  99. );
  100. TRACE_EVENT(bcache_read,
  101. TP_PROTO(struct bio *bio, bool hit, bool bypass),
  102. TP_ARGS(bio, hit, bypass),
  103. TP_STRUCT__entry(
  104. __field(dev_t, dev )
  105. __field(sector_t, sector )
  106. __field(unsigned int, nr_sector )
  107. __array(char, rwbs, 6 )
  108. __field(bool, cache_hit )
  109. __field(bool, bypass )
  110. ),
  111. TP_fast_assign(
  112. __entry->dev = bio->bi_bdev->bd_dev;
  113. __entry->sector = bio->bi_iter.bi_sector;
  114. __entry->nr_sector = bio->bi_iter.bi_size >> 9;
  115. blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
  116. bio->bi_iter.bi_size);
  117. __entry->cache_hit = hit;
  118. __entry->bypass = bypass;
  119. ),
  120. TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
  121. MAJOR(__entry->dev), MINOR(__entry->dev),
  122. __entry->rwbs, (unsigned long long)__entry->sector,
  123. __entry->nr_sector, __entry->cache_hit, __entry->bypass)
  124. );
  125. TRACE_EVENT(bcache_write,
  126. TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
  127. bool writeback, bool bypass),
  128. TP_ARGS(c, inode, bio, writeback, bypass),
  129. TP_STRUCT__entry(
  130. __array(char, uuid, 16 )
  131. __field(u64, inode )
  132. __field(sector_t, sector )
  133. __field(unsigned int, nr_sector )
  134. __array(char, rwbs, 6 )
  135. __field(bool, writeback )
  136. __field(bool, bypass )
  137. ),
  138. TP_fast_assign(
  139. memcpy(__entry->uuid, c->sb.set_uuid, 16);
  140. __entry->inode = inode;
  141. __entry->sector = bio->bi_iter.bi_sector;
  142. __entry->nr_sector = bio->bi_iter.bi_size >> 9;
  143. blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
  144. bio->bi_iter.bi_size);
  145. __entry->writeback = writeback;
  146. __entry->bypass = bypass;
  147. ),
  148. TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u",
  149. __entry->uuid, __entry->inode,
  150. __entry->rwbs, (unsigned long long)__entry->sector,
  151. __entry->nr_sector, __entry->writeback, __entry->bypass)
  152. );
  153. DEFINE_EVENT(bcache_bio, bcache_read_retry,
  154. TP_PROTO(struct bio *bio),
  155. TP_ARGS(bio)
  156. );
  157. DEFINE_EVENT(bkey, bcache_cache_insert,
  158. TP_PROTO(struct bkey *k),
  159. TP_ARGS(k)
  160. );
  161. /* Journal */
  162. DECLARE_EVENT_CLASS(cache_set,
  163. TP_PROTO(struct cache_set *c),
  164. TP_ARGS(c),
  165. TP_STRUCT__entry(
  166. __array(char, uuid, 16 )
  167. ),
  168. TP_fast_assign(
  169. memcpy(__entry->uuid, c->sb.set_uuid, 16);
  170. ),
  171. TP_printk("%pU", __entry->uuid)
  172. );
  173. DEFINE_EVENT(bkey, bcache_journal_replay_key,
  174. TP_PROTO(struct bkey *k),
  175. TP_ARGS(k)
  176. );
  177. DEFINE_EVENT(cache_set, bcache_journal_full,
  178. TP_PROTO(struct cache_set *c),
  179. TP_ARGS(c)
  180. );
  181. DEFINE_EVENT(cache_set, bcache_journal_entry_full,
  182. TP_PROTO(struct cache_set *c),
  183. TP_ARGS(c)
  184. );
  185. DEFINE_EVENT(bcache_bio, bcache_journal_write,
  186. TP_PROTO(struct bio *bio),
  187. TP_ARGS(bio)
  188. );
  189. /* Btree */
  190. DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
  191. TP_PROTO(struct cache_set *c),
  192. TP_ARGS(c)
  193. );
  194. DEFINE_EVENT(btree_node, bcache_btree_read,
  195. TP_PROTO(struct btree *b),
  196. TP_ARGS(b)
  197. );
  198. TRACE_EVENT(bcache_btree_write,
  199. TP_PROTO(struct btree *b),
  200. TP_ARGS(b),
  201. TP_STRUCT__entry(
  202. __field(size_t, bucket )
  203. __field(unsigned, block )
  204. __field(unsigned, keys )
  205. ),
  206. TP_fast_assign(
  207. __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
  208. __entry->block = b->written;
  209. __entry->keys = b->keys.set[b->keys.nsets].data->keys;
  210. ),
  211. TP_printk("bucket %zu", __entry->bucket)
  212. );
  213. DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
  214. TP_PROTO(struct btree *b),
  215. TP_ARGS(b)
  216. );
  217. DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
  218. TP_PROTO(struct cache_set *c),
  219. TP_ARGS(c)
  220. );
  221. DEFINE_EVENT(btree_node, bcache_btree_node_free,
  222. TP_PROTO(struct btree *b),
  223. TP_ARGS(b)
  224. );
  225. TRACE_EVENT(bcache_btree_gc_coalesce,
  226. TP_PROTO(unsigned nodes),
  227. TP_ARGS(nodes),
  228. TP_STRUCT__entry(
  229. __field(unsigned, nodes )
  230. ),
  231. TP_fast_assign(
  232. __entry->nodes = nodes;
  233. ),
  234. TP_printk("coalesced %u nodes", __entry->nodes)
  235. );
  236. DEFINE_EVENT(cache_set, bcache_gc_start,
  237. TP_PROTO(struct cache_set *c),
  238. TP_ARGS(c)
  239. );
  240. DEFINE_EVENT(cache_set, bcache_gc_end,
  241. TP_PROTO(struct cache_set *c),
  242. TP_ARGS(c)
  243. );
  244. DEFINE_EVENT(bkey, bcache_gc_copy,
  245. TP_PROTO(struct bkey *k),
  246. TP_ARGS(k)
  247. );
  248. DEFINE_EVENT(bkey, bcache_gc_copy_collision,
  249. TP_PROTO(struct bkey *k),
  250. TP_ARGS(k)
  251. );
  252. TRACE_EVENT(bcache_btree_insert_key,
  253. TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
  254. TP_ARGS(b, k, op, status),
  255. TP_STRUCT__entry(
  256. __field(u64, btree_node )
  257. __field(u32, btree_level )
  258. __field(u32, inode )
  259. __field(u64, offset )
  260. __field(u32, size )
  261. __field(u8, dirty )
  262. __field(u8, op )
  263. __field(u8, status )
  264. ),
  265. TP_fast_assign(
  266. __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
  267. __entry->btree_level = b->level;
  268. __entry->inode = KEY_INODE(k);
  269. __entry->offset = KEY_OFFSET(k);
  270. __entry->size = KEY_SIZE(k);
  271. __entry->dirty = KEY_DIRTY(k);
  272. __entry->op = op;
  273. __entry->status = status;
  274. ),
  275. TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
  276. __entry->status, __entry->op,
  277. __entry->btree_node, __entry->btree_level,
  278. __entry->inode, __entry->offset,
  279. __entry->size, __entry->dirty)
  280. );
  281. DECLARE_EVENT_CLASS(btree_split,
  282. TP_PROTO(struct btree *b, unsigned keys),
  283. TP_ARGS(b, keys),
  284. TP_STRUCT__entry(
  285. __field(size_t, bucket )
  286. __field(unsigned, keys )
  287. ),
  288. TP_fast_assign(
  289. __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
  290. __entry->keys = keys;
  291. ),
  292. TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
  293. );
  294. DEFINE_EVENT(btree_split, bcache_btree_node_split,
  295. TP_PROTO(struct btree *b, unsigned keys),
  296. TP_ARGS(b, keys)
  297. );
  298. DEFINE_EVENT(btree_split, bcache_btree_node_compact,
  299. TP_PROTO(struct btree *b, unsigned keys),
  300. TP_ARGS(b, keys)
  301. );
  302. DEFINE_EVENT(btree_node, bcache_btree_set_root,
  303. TP_PROTO(struct btree *b),
  304. TP_ARGS(b)
  305. );
  306. TRACE_EVENT(bcache_keyscan,
  307. TP_PROTO(unsigned nr_found,
  308. unsigned start_inode, uint64_t start_offset,
  309. unsigned end_inode, uint64_t end_offset),
  310. TP_ARGS(nr_found,
  311. start_inode, start_offset,
  312. end_inode, end_offset),
  313. TP_STRUCT__entry(
  314. __field(__u32, nr_found )
  315. __field(__u32, start_inode )
  316. __field(__u64, start_offset )
  317. __field(__u32, end_inode )
  318. __field(__u64, end_offset )
  319. ),
  320. TP_fast_assign(
  321. __entry->nr_found = nr_found;
  322. __entry->start_inode = start_inode;
  323. __entry->start_offset = start_offset;
  324. __entry->end_inode = end_inode;
  325. __entry->end_offset = end_offset;
  326. ),
  327. TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
  328. __entry->start_inode, __entry->start_offset,
  329. __entry->end_inode, __entry->end_offset)
  330. );
  331. /* Allocator */
  332. TRACE_EVENT(bcache_invalidate,
  333. TP_PROTO(struct cache *ca, size_t bucket),
  334. TP_ARGS(ca, bucket),
  335. TP_STRUCT__entry(
  336. __field(unsigned, sectors )
  337. __field(dev_t, dev )
  338. __field(__u64, offset )
  339. ),
  340. TP_fast_assign(
  341. __entry->dev = ca->bdev->bd_dev;
  342. __entry->offset = bucket << ca->set->bucket_bits;
  343. __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]);
  344. ),
  345. TP_printk("invalidated %u sectors at %d,%d sector=%llu",
  346. __entry->sectors, MAJOR(__entry->dev),
  347. MINOR(__entry->dev), __entry->offset)
  348. );
  349. TRACE_EVENT(bcache_alloc,
  350. TP_PROTO(struct cache *ca, size_t bucket),
  351. TP_ARGS(ca, bucket),
  352. TP_STRUCT__entry(
  353. __field(dev_t, dev )
  354. __field(__u64, offset )
  355. ),
  356. TP_fast_assign(
  357. __entry->dev = ca->bdev->bd_dev;
  358. __entry->offset = bucket << ca->set->bucket_bits;
  359. ),
  360. TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
  361. MINOR(__entry->dev), __entry->offset)
  362. );
  363. TRACE_EVENT(bcache_alloc_fail,
  364. TP_PROTO(struct cache *ca, unsigned reserve),
  365. TP_ARGS(ca, reserve),
  366. TP_STRUCT__entry(
  367. __field(dev_t, dev )
  368. __field(unsigned, free )
  369. __field(unsigned, free_inc )
  370. __field(unsigned, blocked )
  371. ),
  372. TP_fast_assign(
  373. __entry->dev = ca->bdev->bd_dev;
  374. __entry->free = fifo_used(&ca->free[reserve]);
  375. __entry->free_inc = fifo_used(&ca->free_inc);
  376. __entry->blocked = atomic_read(&ca->set->prio_blocked);
  377. ),
  378. TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
  379. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
  380. __entry->free_inc, __entry->blocked)
  381. );
  382. /* Background writeback */
  383. DEFINE_EVENT(bkey, bcache_writeback,
  384. TP_PROTO(struct bkey *k),
  385. TP_ARGS(k)
  386. );
  387. DEFINE_EVENT(bkey, bcache_writeback_collision,
  388. TP_PROTO(struct bkey *k),
  389. TP_ARGS(k)
  390. );
  391. #endif /* _TRACE_BCACHE_H */
  392. /* This part must be outside protection */
  393. #include <trace/define_trace.h>