bcache.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #undef TRACE_SYSTEM
  3. #define TRACE_SYSTEM bcache
  4. #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
  5. #define _TRACE_BCACHE_H
  6. #include <linux/tracepoint.h>
  7. DECLARE_EVENT_CLASS(bcache_request,
  8. TP_PROTO(struct bcache_device *d, struct bio *bio),
  9. TP_ARGS(d, bio),
  10. TP_STRUCT__entry(
  11. __field(dev_t, dev )
  12. __field(unsigned int, orig_major )
  13. __field(unsigned int, orig_minor )
  14. __field(sector_t, sector )
  15. __field(dev_t, orig_sector )
  16. __field(unsigned int, nr_sector )
  17. __array(char, rwbs, 6 )
  18. ),
  19. TP_fast_assign(
  20. __entry->dev = bio_dev(bio);
  21. __entry->orig_major = d->disk->major;
  22. __entry->orig_minor = d->disk->first_minor;
  23. __entry->sector = bio->bi_iter.bi_sector;
  24. __entry->orig_sector = bio->bi_iter.bi_sector - 16;
  25. __entry->nr_sector = bio->bi_iter.bi_size >> 9;
  26. blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
  27. ),
  28. TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
  29. MAJOR(__entry->dev), MINOR(__entry->dev),
  30. __entry->rwbs, (unsigned long long)__entry->sector,
  31. __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
  32. (unsigned long long)__entry->orig_sector)
  33. );
  34. DECLARE_EVENT_CLASS(bkey,
  35. TP_PROTO(struct bkey *k),
  36. TP_ARGS(k),
  37. TP_STRUCT__entry(
  38. __field(u32, size )
  39. __field(u32, inode )
  40. __field(u64, offset )
  41. __field(bool, dirty )
  42. ),
  43. TP_fast_assign(
  44. __entry->inode = KEY_INODE(k);
  45. __entry->offset = KEY_OFFSET(k);
  46. __entry->size = KEY_SIZE(k);
  47. __entry->dirty = KEY_DIRTY(k);
  48. ),
  49. TP_printk("%u:%llu len %u dirty %u", __entry->inode,
  50. __entry->offset, __entry->size, __entry->dirty)
  51. );
  52. DECLARE_EVENT_CLASS(btree_node,
  53. TP_PROTO(struct btree *b),
  54. TP_ARGS(b),
  55. TP_STRUCT__entry(
  56. __field(size_t, bucket )
  57. ),
  58. TP_fast_assign(
  59. __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
  60. ),
  61. TP_printk("bucket %zu", __entry->bucket)
  62. );
  63. /* request.c */
  64. DEFINE_EVENT(bcache_request, bcache_request_start,
  65. TP_PROTO(struct bcache_device *d, struct bio *bio),
  66. TP_ARGS(d, bio)
  67. );
  68. DEFINE_EVENT(bcache_request, bcache_request_end,
  69. TP_PROTO(struct bcache_device *d, struct bio *bio),
  70. TP_ARGS(d, bio)
  71. );
  72. DECLARE_EVENT_CLASS(bcache_bio,
  73. TP_PROTO(struct bio *bio),
  74. TP_ARGS(bio),
  75. TP_STRUCT__entry(
  76. __field(dev_t, dev )
  77. __field(sector_t, sector )
  78. __field(unsigned int, nr_sector )
  79. __array(char, rwbs, 6 )
  80. ),
  81. TP_fast_assign(
  82. __entry->dev = bio_dev(bio);
  83. __entry->sector = bio->bi_iter.bi_sector;
  84. __entry->nr_sector = bio->bi_iter.bi_size >> 9;
  85. blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
  86. ),
  87. TP_printk("%d,%d %s %llu + %u",
  88. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
  89. (unsigned long long)__entry->sector, __entry->nr_sector)
  90. );
  91. DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
  92. TP_PROTO(struct bio *bio),
  93. TP_ARGS(bio)
  94. );
  95. DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
  96. TP_PROTO(struct bio *bio),
  97. TP_ARGS(bio)
  98. );
  99. TRACE_EVENT(bcache_read,
  100. TP_PROTO(struct bio *bio, bool hit, bool bypass),
  101. TP_ARGS(bio, hit, bypass),
  102. TP_STRUCT__entry(
  103. __field(dev_t, dev )
  104. __field(sector_t, sector )
  105. __field(unsigned int, nr_sector )
  106. __array(char, rwbs, 6 )
  107. __field(bool, cache_hit )
  108. __field(bool, bypass )
  109. ),
  110. TP_fast_assign(
  111. __entry->dev = bio_dev(bio);
  112. __entry->sector = bio->bi_iter.bi_sector;
  113. __entry->nr_sector = bio->bi_iter.bi_size >> 9;
  114. blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
  115. __entry->cache_hit = hit;
  116. __entry->bypass = bypass;
  117. ),
  118. TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
  119. MAJOR(__entry->dev), MINOR(__entry->dev),
  120. __entry->rwbs, (unsigned long long)__entry->sector,
  121. __entry->nr_sector, __entry->cache_hit, __entry->bypass)
  122. );
  123. TRACE_EVENT(bcache_write,
  124. TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
  125. bool writeback, bool bypass),
  126. TP_ARGS(c, inode, bio, writeback, bypass),
  127. TP_STRUCT__entry(
  128. __array(char, uuid, 16 )
  129. __field(u64, inode )
  130. __field(sector_t, sector )
  131. __field(unsigned int, nr_sector )
  132. __array(char, rwbs, 6 )
  133. __field(bool, writeback )
  134. __field(bool, bypass )
  135. ),
  136. TP_fast_assign(
  137. memcpy(__entry->uuid, c->sb.set_uuid, 16);
  138. __entry->inode = inode;
  139. __entry->sector = bio->bi_iter.bi_sector;
  140. __entry->nr_sector = bio->bi_iter.bi_size >> 9;
  141. blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
  142. __entry->writeback = writeback;
  143. __entry->bypass = bypass;
  144. ),
  145. TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u",
  146. __entry->uuid, __entry->inode,
  147. __entry->rwbs, (unsigned long long)__entry->sector,
  148. __entry->nr_sector, __entry->writeback, __entry->bypass)
  149. );
  150. DEFINE_EVENT(bcache_bio, bcache_read_retry,
  151. TP_PROTO(struct bio *bio),
  152. TP_ARGS(bio)
  153. );
  154. DEFINE_EVENT(bkey, bcache_cache_insert,
  155. TP_PROTO(struct bkey *k),
  156. TP_ARGS(k)
  157. );
  158. /* Journal */
  159. DECLARE_EVENT_CLASS(cache_set,
  160. TP_PROTO(struct cache_set *c),
  161. TP_ARGS(c),
  162. TP_STRUCT__entry(
  163. __array(char, uuid, 16 )
  164. ),
  165. TP_fast_assign(
  166. memcpy(__entry->uuid, c->sb.set_uuid, 16);
  167. ),
  168. TP_printk("%pU", __entry->uuid)
  169. );
  170. DEFINE_EVENT(bkey, bcache_journal_replay_key,
  171. TP_PROTO(struct bkey *k),
  172. TP_ARGS(k)
  173. );
  174. DEFINE_EVENT(cache_set, bcache_journal_full,
  175. TP_PROTO(struct cache_set *c),
  176. TP_ARGS(c)
  177. );
  178. DEFINE_EVENT(cache_set, bcache_journal_entry_full,
  179. TP_PROTO(struct cache_set *c),
  180. TP_ARGS(c)
  181. );
  182. DEFINE_EVENT(bcache_bio, bcache_journal_write,
  183. TP_PROTO(struct bio *bio),
  184. TP_ARGS(bio)
  185. );
  186. /* Btree */
  187. DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
  188. TP_PROTO(struct cache_set *c),
  189. TP_ARGS(c)
  190. );
  191. DEFINE_EVENT(btree_node, bcache_btree_read,
  192. TP_PROTO(struct btree *b),
  193. TP_ARGS(b)
  194. );
  195. TRACE_EVENT(bcache_btree_write,
  196. TP_PROTO(struct btree *b),
  197. TP_ARGS(b),
  198. TP_STRUCT__entry(
  199. __field(size_t, bucket )
  200. __field(unsigned, block )
  201. __field(unsigned, keys )
  202. ),
  203. TP_fast_assign(
  204. __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
  205. __entry->block = b->written;
  206. __entry->keys = b->keys.set[b->keys.nsets].data->keys;
  207. ),
  208. TP_printk("bucket %zu", __entry->bucket)
  209. );
  210. DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
  211. TP_PROTO(struct btree *b),
  212. TP_ARGS(b)
  213. );
  214. DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
  215. TP_PROTO(struct cache_set *c),
  216. TP_ARGS(c)
  217. );
  218. DEFINE_EVENT(btree_node, bcache_btree_node_free,
  219. TP_PROTO(struct btree *b),
  220. TP_ARGS(b)
  221. );
  222. TRACE_EVENT(bcache_btree_gc_coalesce,
  223. TP_PROTO(unsigned nodes),
  224. TP_ARGS(nodes),
  225. TP_STRUCT__entry(
  226. __field(unsigned, nodes )
  227. ),
  228. TP_fast_assign(
  229. __entry->nodes = nodes;
  230. ),
  231. TP_printk("coalesced %u nodes", __entry->nodes)
  232. );
  233. DEFINE_EVENT(cache_set, bcache_gc_start,
  234. TP_PROTO(struct cache_set *c),
  235. TP_ARGS(c)
  236. );
  237. DEFINE_EVENT(cache_set, bcache_gc_end,
  238. TP_PROTO(struct cache_set *c),
  239. TP_ARGS(c)
  240. );
  241. DEFINE_EVENT(bkey, bcache_gc_copy,
  242. TP_PROTO(struct bkey *k),
  243. TP_ARGS(k)
  244. );
  245. DEFINE_EVENT(bkey, bcache_gc_copy_collision,
  246. TP_PROTO(struct bkey *k),
  247. TP_ARGS(k)
  248. );
  249. TRACE_EVENT(bcache_btree_insert_key,
  250. TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
  251. TP_ARGS(b, k, op, status),
  252. TP_STRUCT__entry(
  253. __field(u64, btree_node )
  254. __field(u32, btree_level )
  255. __field(u32, inode )
  256. __field(u64, offset )
  257. __field(u32, size )
  258. __field(u8, dirty )
  259. __field(u8, op )
  260. __field(u8, status )
  261. ),
  262. TP_fast_assign(
  263. __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
  264. __entry->btree_level = b->level;
  265. __entry->inode = KEY_INODE(k);
  266. __entry->offset = KEY_OFFSET(k);
  267. __entry->size = KEY_SIZE(k);
  268. __entry->dirty = KEY_DIRTY(k);
  269. __entry->op = op;
  270. __entry->status = status;
  271. ),
  272. TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
  273. __entry->status, __entry->op,
  274. __entry->btree_node, __entry->btree_level,
  275. __entry->inode, __entry->offset,
  276. __entry->size, __entry->dirty)
  277. );
  278. DECLARE_EVENT_CLASS(btree_split,
  279. TP_PROTO(struct btree *b, unsigned keys),
  280. TP_ARGS(b, keys),
  281. TP_STRUCT__entry(
  282. __field(size_t, bucket )
  283. __field(unsigned, keys )
  284. ),
  285. TP_fast_assign(
  286. __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
  287. __entry->keys = keys;
  288. ),
  289. TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
  290. );
  291. DEFINE_EVENT(btree_split, bcache_btree_node_split,
  292. TP_PROTO(struct btree *b, unsigned keys),
  293. TP_ARGS(b, keys)
  294. );
  295. DEFINE_EVENT(btree_split, bcache_btree_node_compact,
  296. TP_PROTO(struct btree *b, unsigned keys),
  297. TP_ARGS(b, keys)
  298. );
  299. DEFINE_EVENT(btree_node, bcache_btree_set_root,
  300. TP_PROTO(struct btree *b),
  301. TP_ARGS(b)
  302. );
  303. TRACE_EVENT(bcache_keyscan,
  304. TP_PROTO(unsigned nr_found,
  305. unsigned start_inode, uint64_t start_offset,
  306. unsigned end_inode, uint64_t end_offset),
  307. TP_ARGS(nr_found,
  308. start_inode, start_offset,
  309. end_inode, end_offset),
  310. TP_STRUCT__entry(
  311. __field(__u32, nr_found )
  312. __field(__u32, start_inode )
  313. __field(__u64, start_offset )
  314. __field(__u32, end_inode )
  315. __field(__u64, end_offset )
  316. ),
  317. TP_fast_assign(
  318. __entry->nr_found = nr_found;
  319. __entry->start_inode = start_inode;
  320. __entry->start_offset = start_offset;
  321. __entry->end_inode = end_inode;
  322. __entry->end_offset = end_offset;
  323. ),
  324. TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
  325. __entry->start_inode, __entry->start_offset,
  326. __entry->end_inode, __entry->end_offset)
  327. );
  328. /* Allocator */
  329. TRACE_EVENT(bcache_invalidate,
  330. TP_PROTO(struct cache *ca, size_t bucket),
  331. TP_ARGS(ca, bucket),
  332. TP_STRUCT__entry(
  333. __field(unsigned, sectors )
  334. __field(dev_t, dev )
  335. __field(__u64, offset )
  336. ),
  337. TP_fast_assign(
  338. __entry->dev = ca->bdev->bd_dev;
  339. __entry->offset = bucket << ca->set->bucket_bits;
  340. __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]);
  341. ),
  342. TP_printk("invalidated %u sectors at %d,%d sector=%llu",
  343. __entry->sectors, MAJOR(__entry->dev),
  344. MINOR(__entry->dev), __entry->offset)
  345. );
  346. TRACE_EVENT(bcache_alloc,
  347. TP_PROTO(struct cache *ca, size_t bucket),
  348. TP_ARGS(ca, bucket),
  349. TP_STRUCT__entry(
  350. __field(dev_t, dev )
  351. __field(__u64, offset )
  352. ),
  353. TP_fast_assign(
  354. __entry->dev = ca->bdev->bd_dev;
  355. __entry->offset = bucket << ca->set->bucket_bits;
  356. ),
  357. TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
  358. MINOR(__entry->dev), __entry->offset)
  359. );
  360. TRACE_EVENT(bcache_alloc_fail,
  361. TP_PROTO(struct cache *ca, unsigned reserve),
  362. TP_ARGS(ca, reserve),
  363. TP_STRUCT__entry(
  364. __field(dev_t, dev )
  365. __field(unsigned, free )
  366. __field(unsigned, free_inc )
  367. __field(unsigned, blocked )
  368. ),
  369. TP_fast_assign(
  370. __entry->dev = ca->bdev->bd_dev;
  371. __entry->free = fifo_used(&ca->free[reserve]);
  372. __entry->free_inc = fifo_used(&ca->free_inc);
  373. __entry->blocked = atomic_read(&ca->set->prio_blocked);
  374. ),
  375. TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
  376. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
  377. __entry->free_inc, __entry->blocked)
  378. );
  379. /* Background writeback */
  380. DEFINE_EVENT(bkey, bcache_writeback,
  381. TP_PROTO(struct bkey *k),
  382. TP_ARGS(k)
  383. );
  384. DEFINE_EVENT(bkey, bcache_writeback_collision,
  385. TP_PROTO(struct bkey *k),
  386. TP_ARGS(k)
  387. );
  388. #endif /* _TRACE_BCACHE_H */
  389. /* This part must be outside protection */
  390. #include <trace/define_trace.h>