drbd_debugfs.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952
  1. #define pr_fmt(fmt) "drbd debugfs: " fmt
  2. #include <linux/kernel.h>
  3. #include <linux/module.h>
  4. #include <linux/debugfs.h>
  5. #include <linux/seq_file.h>
  6. #include <linux/stat.h>
  7. #include <linux/jiffies.h>
  8. #include <linux/list.h>
  9. #include "drbd_int.h"
  10. #include "drbd_req.h"
  11. #include "drbd_debugfs.h"
  12. /**********************************************************************
  13. * Whenever you change the file format, remember to bump the version. *
  14. **********************************************************************/
  15. static struct dentry *drbd_debugfs_root;
  16. static struct dentry *drbd_debugfs_version;
  17. static struct dentry *drbd_debugfs_resources;
  18. static struct dentry *drbd_debugfs_minors;
  19. static void seq_print_age_or_dash(struct seq_file *m, bool valid, unsigned long dt)
  20. {
  21. if (valid)
  22. seq_printf(m, "\t%d", jiffies_to_msecs(dt));
  23. else
  24. seq_printf(m, "\t-");
  25. }
  26. static void __seq_print_rq_state_bit(struct seq_file *m,
  27. bool is_set, char *sep, const char *set_name, const char *unset_name)
  28. {
  29. if (is_set && set_name) {
  30. seq_putc(m, *sep);
  31. seq_puts(m, set_name);
  32. *sep = '|';
  33. } else if (!is_set && unset_name) {
  34. seq_putc(m, *sep);
  35. seq_puts(m, unset_name);
  36. *sep = '|';
  37. }
  38. }
  39. static void seq_print_rq_state_bit(struct seq_file *m,
  40. bool is_set, char *sep, const char *set_name)
  41. {
  42. __seq_print_rq_state_bit(m, is_set, sep, set_name, NULL);
  43. }
  44. /* pretty print enum drbd_req_state_bits req->rq_state */
  45. static void seq_print_request_state(struct seq_file *m, struct drbd_request *req)
  46. {
  47. unsigned int s = req->rq_state;
  48. char sep = ' ';
  49. seq_printf(m, "\t0x%08x", s);
  50. seq_printf(m, "\tmaster: %s", req->master_bio ? "pending" : "completed");
  51. /* RQ_WRITE ignored, already reported */
  52. seq_puts(m, "\tlocal:");
  53. seq_print_rq_state_bit(m, s & RQ_IN_ACT_LOG, &sep, "in-AL");
  54. seq_print_rq_state_bit(m, s & RQ_POSTPONED, &sep, "postponed");
  55. seq_print_rq_state_bit(m, s & RQ_COMPLETION_SUSP, &sep, "suspended");
  56. sep = ' ';
  57. seq_print_rq_state_bit(m, s & RQ_LOCAL_PENDING, &sep, "pending");
  58. seq_print_rq_state_bit(m, s & RQ_LOCAL_COMPLETED, &sep, "completed");
  59. seq_print_rq_state_bit(m, s & RQ_LOCAL_ABORTED, &sep, "aborted");
  60. seq_print_rq_state_bit(m, s & RQ_LOCAL_OK, &sep, "ok");
  61. if (sep == ' ')
  62. seq_puts(m, " -");
  63. /* for_each_connection ... */
  64. seq_printf(m, "\tnet:");
  65. sep = ' ';
  66. seq_print_rq_state_bit(m, s & RQ_NET_PENDING, &sep, "pending");
  67. seq_print_rq_state_bit(m, s & RQ_NET_QUEUED, &sep, "queued");
  68. seq_print_rq_state_bit(m, s & RQ_NET_SENT, &sep, "sent");
  69. seq_print_rq_state_bit(m, s & RQ_NET_DONE, &sep, "done");
  70. seq_print_rq_state_bit(m, s & RQ_NET_SIS, &sep, "sis");
  71. seq_print_rq_state_bit(m, s & RQ_NET_OK, &sep, "ok");
  72. if (sep == ' ')
  73. seq_puts(m, " -");
  74. seq_printf(m, " :");
  75. sep = ' ';
  76. seq_print_rq_state_bit(m, s & RQ_EXP_RECEIVE_ACK, &sep, "B");
  77. seq_print_rq_state_bit(m, s & RQ_EXP_WRITE_ACK, &sep, "C");
  78. seq_print_rq_state_bit(m, s & RQ_EXP_BARR_ACK, &sep, "barr");
  79. if (sep == ' ')
  80. seq_puts(m, " -");
  81. seq_printf(m, "\n");
  82. }
  83. static void seq_print_one_request(struct seq_file *m, struct drbd_request *req, unsigned long now)
  84. {
  85. /* change anything here, fixup header below! */
  86. unsigned int s = req->rq_state;
  87. #define RQ_HDR_1 "epoch\tsector\tsize\trw"
  88. seq_printf(m, "0x%x\t%llu\t%u\t%s",
  89. req->epoch,
  90. (unsigned long long)req->i.sector, req->i.size >> 9,
  91. (s & RQ_WRITE) ? "W" : "R");
  92. #define RQ_HDR_2 "\tstart\tin AL\tsubmit"
  93. seq_printf(m, "\t%d", jiffies_to_msecs(now - req->start_jif));
  94. seq_print_age_or_dash(m, s & RQ_IN_ACT_LOG, now - req->in_actlog_jif);
  95. seq_print_age_or_dash(m, s & RQ_LOCAL_PENDING, now - req->pre_submit_jif);
  96. #define RQ_HDR_3 "\tsent\tacked\tdone"
  97. seq_print_age_or_dash(m, s & RQ_NET_SENT, now - req->pre_send_jif);
  98. seq_print_age_or_dash(m, (s & RQ_NET_SENT) && !(s & RQ_NET_PENDING), now - req->acked_jif);
  99. seq_print_age_or_dash(m, s & RQ_NET_DONE, now - req->net_done_jif);
  100. #define RQ_HDR_4 "\tstate\n"
  101. seq_print_request_state(m, req);
  102. }
  103. #define RQ_HDR RQ_HDR_1 RQ_HDR_2 RQ_HDR_3 RQ_HDR_4
  104. static void seq_print_minor_vnr_req(struct seq_file *m, struct drbd_request *req, unsigned long now)
  105. {
  106. seq_printf(m, "%u\t%u\t", req->device->minor, req->device->vnr);
  107. seq_print_one_request(m, req, now);
  108. }
  109. static void seq_print_resource_pending_meta_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
  110. {
  111. struct drbd_device *device;
  112. unsigned int i;
  113. seq_puts(m, "minor\tvnr\tstart\tsubmit\tintent\n");
  114. rcu_read_lock();
  115. idr_for_each_entry(&resource->devices, device, i) {
  116. struct drbd_md_io tmp;
  117. /* In theory this is racy,
  118. * in the sense that there could have been a
  119. * drbd_md_put_buffer(); drbd_md_get_buffer();
  120. * between accessing these members here. */
  121. tmp = device->md_io;
  122. if (atomic_read(&tmp.in_use)) {
  123. seq_printf(m, "%u\t%u\t%d\t",
  124. device->minor, device->vnr,
  125. jiffies_to_msecs(now - tmp.start_jif));
  126. if (time_before(tmp.submit_jif, tmp.start_jif))
  127. seq_puts(m, "-\t");
  128. else
  129. seq_printf(m, "%d\t", jiffies_to_msecs(now - tmp.submit_jif));
  130. seq_printf(m, "%s\n", tmp.current_use);
  131. }
  132. }
  133. rcu_read_unlock();
  134. }
  135. static void seq_print_waiting_for_AL(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
  136. {
  137. struct drbd_device *device;
  138. unsigned int i;
  139. seq_puts(m, "minor\tvnr\tage\t#waiting\n");
  140. rcu_read_lock();
  141. idr_for_each_entry(&resource->devices, device, i) {
  142. unsigned long jif;
  143. struct drbd_request *req;
  144. int n = atomic_read(&device->ap_actlog_cnt);
  145. if (n) {
  146. spin_lock_irq(&device->resource->req_lock);
  147. req = list_first_entry_or_null(&device->pending_master_completion[1],
  148. struct drbd_request, req_pending_master_completion);
  149. /* if the oldest request does not wait for the activity log
  150. * it is not interesting for us here */
  151. if (req && !(req->rq_state & RQ_IN_ACT_LOG))
  152. jif = req->start_jif;
  153. else
  154. req = NULL;
  155. spin_unlock_irq(&device->resource->req_lock);
  156. }
  157. if (n) {
  158. seq_printf(m, "%u\t%u\t", device->minor, device->vnr);
  159. if (req)
  160. seq_printf(m, "%u\t", jiffies_to_msecs(now - jif));
  161. else
  162. seq_puts(m, "-\t");
  163. seq_printf(m, "%u\n", n);
  164. }
  165. }
  166. rcu_read_unlock();
  167. }
  168. static void seq_print_device_bitmap_io(struct seq_file *m, struct drbd_device *device, unsigned long now)
  169. {
  170. struct drbd_bm_aio_ctx *ctx;
  171. unsigned long start_jif;
  172. unsigned int in_flight;
  173. unsigned int flags;
  174. spin_lock_irq(&device->resource->req_lock);
  175. ctx = list_first_entry_or_null(&device->pending_bitmap_io, struct drbd_bm_aio_ctx, list);
  176. if (ctx && ctx->done)
  177. ctx = NULL;
  178. if (ctx) {
  179. start_jif = ctx->start_jif;
  180. in_flight = atomic_read(&ctx->in_flight);
  181. flags = ctx->flags;
  182. }
  183. spin_unlock_irq(&device->resource->req_lock);
  184. if (ctx) {
  185. seq_printf(m, "%u\t%u\t%c\t%u\t%u\n",
  186. device->minor, device->vnr,
  187. (flags & BM_AIO_READ) ? 'R' : 'W',
  188. jiffies_to_msecs(now - start_jif),
  189. in_flight);
  190. }
  191. }
  192. static void seq_print_resource_pending_bitmap_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
  193. {
  194. struct drbd_device *device;
  195. unsigned int i;
  196. seq_puts(m, "minor\tvnr\trw\tage\t#in-flight\n");
  197. rcu_read_lock();
  198. idr_for_each_entry(&resource->devices, device, i) {
  199. seq_print_device_bitmap_io(m, device, now);
  200. }
  201. rcu_read_unlock();
  202. }
  203. /* pretty print enum peer_req->flags */
  204. static void seq_print_peer_request_flags(struct seq_file *m, struct drbd_peer_request *peer_req)
  205. {
  206. unsigned long f = peer_req->flags;
  207. char sep = ' ';
  208. __seq_print_rq_state_bit(m, f & EE_SUBMITTED, &sep, "submitted", "preparing");
  209. __seq_print_rq_state_bit(m, f & EE_APPLICATION, &sep, "application", "internal");
  210. seq_print_rq_state_bit(m, f & EE_CALL_AL_COMPLETE_IO, &sep, "in-AL");
  211. seq_print_rq_state_bit(m, f & EE_SEND_WRITE_ACK, &sep, "C");
  212. seq_print_rq_state_bit(m, f & EE_MAY_SET_IN_SYNC, &sep, "set-in-sync");
  213. if (f & EE_IS_TRIM)
  214. __seq_print_rq_state_bit(m, f & EE_IS_TRIM_USE_ZEROOUT, &sep, "zero-out", "trim");
  215. seq_print_rq_state_bit(m, f & EE_WRITE_SAME, &sep, "write-same");
  216. seq_putc(m, '\n');
  217. }
  218. static void seq_print_peer_request(struct seq_file *m,
  219. struct drbd_device *device, struct list_head *lh,
  220. unsigned long now)
  221. {
  222. bool reported_preparing = false;
  223. struct drbd_peer_request *peer_req;
  224. list_for_each_entry(peer_req, lh, w.list) {
  225. if (reported_preparing && !(peer_req->flags & EE_SUBMITTED))
  226. continue;
  227. if (device)
  228. seq_printf(m, "%u\t%u\t", device->minor, device->vnr);
  229. seq_printf(m, "%llu\t%u\t%c\t%u\t",
  230. (unsigned long long)peer_req->i.sector, peer_req->i.size >> 9,
  231. (peer_req->flags & EE_WRITE) ? 'W' : 'R',
  232. jiffies_to_msecs(now - peer_req->submit_jif));
  233. seq_print_peer_request_flags(m, peer_req);
  234. if (peer_req->flags & EE_SUBMITTED)
  235. break;
  236. else
  237. reported_preparing = true;
  238. }
  239. }
  240. static void seq_print_device_peer_requests(struct seq_file *m,
  241. struct drbd_device *device, unsigned long now)
  242. {
  243. seq_puts(m, "minor\tvnr\tsector\tsize\trw\tage\tflags\n");
  244. spin_lock_irq(&device->resource->req_lock);
  245. seq_print_peer_request(m, device, &device->active_ee, now);
  246. seq_print_peer_request(m, device, &device->read_ee, now);
  247. seq_print_peer_request(m, device, &device->sync_ee, now);
  248. spin_unlock_irq(&device->resource->req_lock);
  249. if (test_bit(FLUSH_PENDING, &device->flags)) {
  250. seq_printf(m, "%u\t%u\t-\t-\tF\t%u\tflush\n",
  251. device->minor, device->vnr,
  252. jiffies_to_msecs(now - device->flush_jif));
  253. }
  254. }
  255. static void seq_print_resource_pending_peer_requests(struct seq_file *m,
  256. struct drbd_resource *resource, unsigned long now)
  257. {
  258. struct drbd_device *device;
  259. unsigned int i;
  260. rcu_read_lock();
  261. idr_for_each_entry(&resource->devices, device, i) {
  262. seq_print_device_peer_requests(m, device, now);
  263. }
  264. rcu_read_unlock();
  265. }
  266. static void seq_print_resource_transfer_log_summary(struct seq_file *m,
  267. struct drbd_resource *resource,
  268. struct drbd_connection *connection,
  269. unsigned long now)
  270. {
  271. struct drbd_request *req;
  272. unsigned int count = 0;
  273. unsigned int show_state = 0;
  274. seq_puts(m, "n\tdevice\tvnr\t" RQ_HDR);
  275. spin_lock_irq(&resource->req_lock);
  276. list_for_each_entry(req, &connection->transfer_log, tl_requests) {
  277. unsigned int tmp = 0;
  278. unsigned int s;
  279. ++count;
  280. /* don't disable irq "forever" */
  281. if (!(count & 0x1ff)) {
  282. struct drbd_request *req_next;
  283. kref_get(&req->kref);
  284. spin_unlock_irq(&resource->req_lock);
  285. cond_resched();
  286. spin_lock_irq(&resource->req_lock);
  287. req_next = list_next_entry(req, tl_requests);
  288. if (kref_put(&req->kref, drbd_req_destroy))
  289. req = req_next;
  290. if (&req->tl_requests == &connection->transfer_log)
  291. break;
  292. }
  293. s = req->rq_state;
  294. /* This is meant to summarize timing issues, to be able to tell
  295. * local disk problems from network problems.
  296. * Skip requests, if we have shown an even older request with
  297. * similar aspects already. */
  298. if (req->master_bio == NULL)
  299. tmp |= 1;
  300. if ((s & RQ_LOCAL_MASK) && (s & RQ_LOCAL_PENDING))
  301. tmp |= 2;
  302. if (s & RQ_NET_MASK) {
  303. if (!(s & RQ_NET_SENT))
  304. tmp |= 4;
  305. if (s & RQ_NET_PENDING)
  306. tmp |= 8;
  307. if (!(s & RQ_NET_DONE))
  308. tmp |= 16;
  309. }
  310. if ((tmp & show_state) == tmp)
  311. continue;
  312. show_state |= tmp;
  313. seq_printf(m, "%u\t", count);
  314. seq_print_minor_vnr_req(m, req, now);
  315. if (show_state == 0x1f)
  316. break;
  317. }
  318. spin_unlock_irq(&resource->req_lock);
  319. }
  320. /* TODO: transfer_log and friends should be moved to resource */
  321. static int in_flight_summary_show(struct seq_file *m, void *pos)
  322. {
  323. struct drbd_resource *resource = m->private;
  324. struct drbd_connection *connection;
  325. unsigned long jif = jiffies;
  326. connection = first_connection(resource);
  327. /* This does not happen, actually.
  328. * But be robust and prepare for future code changes. */
  329. if (!connection || !kref_get_unless_zero(&connection->kref))
  330. return -ESTALE;
  331. /* BUMP me if you change the file format/content/presentation */
  332. seq_printf(m, "v: %u\n\n", 0);
  333. seq_puts(m, "oldest bitmap IO\n");
  334. seq_print_resource_pending_bitmap_io(m, resource, jif);
  335. seq_putc(m, '\n');
  336. seq_puts(m, "meta data IO\n");
  337. seq_print_resource_pending_meta_io(m, resource, jif);
  338. seq_putc(m, '\n');
  339. seq_puts(m, "socket buffer stats\n");
  340. /* for each connection ... once we have more than one */
  341. rcu_read_lock();
  342. if (connection->data.socket) {
  343. /* open coded SIOCINQ, the "relevant" part */
  344. struct tcp_sock *tp = tcp_sk(connection->data.socket->sk);
  345. int answ = tp->rcv_nxt - tp->copied_seq;
  346. seq_printf(m, "unread receive buffer: %u Byte\n", answ);
  347. /* open coded SIOCOUTQ, the "relevant" part */
  348. answ = tp->write_seq - tp->snd_una;
  349. seq_printf(m, "unacked send buffer: %u Byte\n", answ);
  350. }
  351. rcu_read_unlock();
  352. seq_putc(m, '\n');
  353. seq_puts(m, "oldest peer requests\n");
  354. seq_print_resource_pending_peer_requests(m, resource, jif);
  355. seq_putc(m, '\n');
  356. seq_puts(m, "application requests waiting for activity log\n");
  357. seq_print_waiting_for_AL(m, resource, jif);
  358. seq_putc(m, '\n');
  359. seq_puts(m, "oldest application requests\n");
  360. seq_print_resource_transfer_log_summary(m, resource, connection, jif);
  361. seq_putc(m, '\n');
  362. jif = jiffies - jif;
  363. if (jif)
  364. seq_printf(m, "generated in %d ms\n", jiffies_to_msecs(jif));
  365. kref_put(&connection->kref, drbd_destroy_connection);
  366. return 0;
  367. }
  368. /* make sure at *open* time that the respective object won't go away. */
  369. static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, void *),
  370. void *data, struct kref *kref,
  371. void (*release)(struct kref *))
  372. {
  373. struct dentry *parent;
  374. int ret = -ESTALE;
  375. /* Are we still linked,
  376. * or has debugfs_remove() already been called? */
  377. parent = file->f_path.dentry->d_parent;
  378. /* serialize with d_delete() */
  379. inode_lock(d_inode(parent));
  380. /* Make sure the object is still alive */
  381. if (simple_positive(file->f_path.dentry)
  382. && kref_get_unless_zero(kref))
  383. ret = 0;
  384. inode_unlock(d_inode(parent));
  385. if (!ret) {
  386. ret = single_open(file, show, data);
  387. if (ret)
  388. kref_put(kref, release);
  389. }
  390. return ret;
  391. }
  392. static int in_flight_summary_open(struct inode *inode, struct file *file)
  393. {
  394. struct drbd_resource *resource = inode->i_private;
  395. return drbd_single_open(file, in_flight_summary_show, resource,
  396. &resource->kref, drbd_destroy_resource);
  397. }
  398. static int in_flight_summary_release(struct inode *inode, struct file *file)
  399. {
  400. struct drbd_resource *resource = inode->i_private;
  401. kref_put(&resource->kref, drbd_destroy_resource);
  402. return single_release(inode, file);
  403. }
  404. static const struct file_operations in_flight_summary_fops = {
  405. .owner = THIS_MODULE,
  406. .open = in_flight_summary_open,
  407. .read = seq_read,
  408. .llseek = seq_lseek,
  409. .release = in_flight_summary_release,
  410. };
  411. void drbd_debugfs_resource_add(struct drbd_resource *resource)
  412. {
  413. struct dentry *dentry;
  414. if (!drbd_debugfs_resources)
  415. return;
  416. dentry = debugfs_create_dir(resource->name, drbd_debugfs_resources);
  417. if (IS_ERR_OR_NULL(dentry))
  418. goto fail;
  419. resource->debugfs_res = dentry;
  420. dentry = debugfs_create_dir("volumes", resource->debugfs_res);
  421. if (IS_ERR_OR_NULL(dentry))
  422. goto fail;
  423. resource->debugfs_res_volumes = dentry;
  424. dentry = debugfs_create_dir("connections", resource->debugfs_res);
  425. if (IS_ERR_OR_NULL(dentry))
  426. goto fail;
  427. resource->debugfs_res_connections = dentry;
  428. dentry = debugfs_create_file("in_flight_summary", S_IRUSR|S_IRGRP,
  429. resource->debugfs_res, resource,
  430. &in_flight_summary_fops);
  431. if (IS_ERR_OR_NULL(dentry))
  432. goto fail;
  433. resource->debugfs_res_in_flight_summary = dentry;
  434. return;
  435. fail:
  436. drbd_debugfs_resource_cleanup(resource);
  437. drbd_err(resource, "failed to create debugfs dentry\n");
  438. }
  439. static void drbd_debugfs_remove(struct dentry **dp)
  440. {
  441. debugfs_remove(*dp);
  442. *dp = NULL;
  443. }
  444. void drbd_debugfs_resource_cleanup(struct drbd_resource *resource)
  445. {
  446. /* it is ok to call debugfs_remove(NULL) */
  447. drbd_debugfs_remove(&resource->debugfs_res_in_flight_summary);
  448. drbd_debugfs_remove(&resource->debugfs_res_connections);
  449. drbd_debugfs_remove(&resource->debugfs_res_volumes);
  450. drbd_debugfs_remove(&resource->debugfs_res);
  451. }
  452. static void seq_print_one_timing_detail(struct seq_file *m,
  453. const struct drbd_thread_timing_details *tdp,
  454. unsigned long now)
  455. {
  456. struct drbd_thread_timing_details td;
  457. /* No locking...
  458. * use temporary assignment to get at consistent data. */
  459. do {
  460. td = *tdp;
  461. } while (td.cb_nr != tdp->cb_nr);
  462. if (!td.cb_addr)
  463. return;
  464. seq_printf(m, "%u\t%d\t%s:%u\t%ps\n",
  465. td.cb_nr,
  466. jiffies_to_msecs(now - td.start_jif),
  467. td.caller_fn, td.line,
  468. td.cb_addr);
  469. }
  470. static void seq_print_timing_details(struct seq_file *m,
  471. const char *title,
  472. unsigned int cb_nr, struct drbd_thread_timing_details *tdp, unsigned long now)
  473. {
  474. unsigned int start_idx;
  475. unsigned int i;
  476. seq_printf(m, "%s\n", title);
  477. /* If not much is going on, this will result in natural ordering.
  478. * If it is very busy, we will possibly skip events, or even see wrap
  479. * arounds, which could only be avoided with locking.
  480. */
  481. start_idx = cb_nr % DRBD_THREAD_DETAILS_HIST;
  482. for (i = start_idx; i < DRBD_THREAD_DETAILS_HIST; i++)
  483. seq_print_one_timing_detail(m, tdp+i, now);
  484. for (i = 0; i < start_idx; i++)
  485. seq_print_one_timing_detail(m, tdp+i, now);
  486. }
  487. static int callback_history_show(struct seq_file *m, void *ignored)
  488. {
  489. struct drbd_connection *connection = m->private;
  490. unsigned long jif = jiffies;
  491. /* BUMP me if you change the file format/content/presentation */
  492. seq_printf(m, "v: %u\n\n", 0);
  493. seq_puts(m, "n\tage\tcallsite\tfn\n");
  494. seq_print_timing_details(m, "worker", connection->w_cb_nr, connection->w_timing_details, jif);
  495. seq_print_timing_details(m, "receiver", connection->r_cb_nr, connection->r_timing_details, jif);
  496. return 0;
  497. }
  498. static int callback_history_open(struct inode *inode, struct file *file)
  499. {
  500. struct drbd_connection *connection = inode->i_private;
  501. return drbd_single_open(file, callback_history_show, connection,
  502. &connection->kref, drbd_destroy_connection);
  503. }
  504. static int callback_history_release(struct inode *inode, struct file *file)
  505. {
  506. struct drbd_connection *connection = inode->i_private;
  507. kref_put(&connection->kref, drbd_destroy_connection);
  508. return single_release(inode, file);
  509. }
  510. static const struct file_operations connection_callback_history_fops = {
  511. .owner = THIS_MODULE,
  512. .open = callback_history_open,
  513. .read = seq_read,
  514. .llseek = seq_lseek,
  515. .release = callback_history_release,
  516. };
  517. static int connection_oldest_requests_show(struct seq_file *m, void *ignored)
  518. {
  519. struct drbd_connection *connection = m->private;
  520. unsigned long now = jiffies;
  521. struct drbd_request *r1, *r2;
  522. /* BUMP me if you change the file format/content/presentation */
  523. seq_printf(m, "v: %u\n\n", 0);
  524. spin_lock_irq(&connection->resource->req_lock);
  525. r1 = connection->req_next;
  526. if (r1)
  527. seq_print_minor_vnr_req(m, r1, now);
  528. r2 = connection->req_ack_pending;
  529. if (r2 && r2 != r1) {
  530. r1 = r2;
  531. seq_print_minor_vnr_req(m, r1, now);
  532. }
  533. r2 = connection->req_not_net_done;
  534. if (r2 && r2 != r1)
  535. seq_print_minor_vnr_req(m, r2, now);
  536. spin_unlock_irq(&connection->resource->req_lock);
  537. return 0;
  538. }
  539. static int connection_oldest_requests_open(struct inode *inode, struct file *file)
  540. {
  541. struct drbd_connection *connection = inode->i_private;
  542. return drbd_single_open(file, connection_oldest_requests_show, connection,
  543. &connection->kref, drbd_destroy_connection);
  544. }
  545. static int connection_oldest_requests_release(struct inode *inode, struct file *file)
  546. {
  547. struct drbd_connection *connection = inode->i_private;
  548. kref_put(&connection->kref, drbd_destroy_connection);
  549. return single_release(inode, file);
  550. }
  551. static const struct file_operations connection_oldest_requests_fops = {
  552. .owner = THIS_MODULE,
  553. .open = connection_oldest_requests_open,
  554. .read = seq_read,
  555. .llseek = seq_lseek,
  556. .release = connection_oldest_requests_release,
  557. };
  558. void drbd_debugfs_connection_add(struct drbd_connection *connection)
  559. {
  560. struct dentry *conns_dir = connection->resource->debugfs_res_connections;
  561. struct dentry *dentry;
  562. if (!conns_dir)
  563. return;
  564. /* Once we enable mutliple peers,
  565. * these connections will have descriptive names.
  566. * For now, it is just the one connection to the (only) "peer". */
  567. dentry = debugfs_create_dir("peer", conns_dir);
  568. if (IS_ERR_OR_NULL(dentry))
  569. goto fail;
  570. connection->debugfs_conn = dentry;
  571. dentry = debugfs_create_file("callback_history", S_IRUSR|S_IRGRP,
  572. connection->debugfs_conn, connection,
  573. &connection_callback_history_fops);
  574. if (IS_ERR_OR_NULL(dentry))
  575. goto fail;
  576. connection->debugfs_conn_callback_history = dentry;
  577. dentry = debugfs_create_file("oldest_requests", S_IRUSR|S_IRGRP,
  578. connection->debugfs_conn, connection,
  579. &connection_oldest_requests_fops);
  580. if (IS_ERR_OR_NULL(dentry))
  581. goto fail;
  582. connection->debugfs_conn_oldest_requests = dentry;
  583. return;
  584. fail:
  585. drbd_debugfs_connection_cleanup(connection);
  586. drbd_err(connection, "failed to create debugfs dentry\n");
  587. }
  588. void drbd_debugfs_connection_cleanup(struct drbd_connection *connection)
  589. {
  590. drbd_debugfs_remove(&connection->debugfs_conn_callback_history);
  591. drbd_debugfs_remove(&connection->debugfs_conn_oldest_requests);
  592. drbd_debugfs_remove(&connection->debugfs_conn);
  593. }
  594. static void resync_dump_detail(struct seq_file *m, struct lc_element *e)
  595. {
  596. struct bm_extent *bme = lc_entry(e, struct bm_extent, lce);
  597. seq_printf(m, "%5d %s %s %s", bme->rs_left,
  598. test_bit(BME_NO_WRITES, &bme->flags) ? "NO_WRITES" : "---------",
  599. test_bit(BME_LOCKED, &bme->flags) ? "LOCKED" : "------",
  600. test_bit(BME_PRIORITY, &bme->flags) ? "PRIORITY" : "--------"
  601. );
  602. }
  603. static int device_resync_extents_show(struct seq_file *m, void *ignored)
  604. {
  605. struct drbd_device *device = m->private;
  606. /* BUMP me if you change the file format/content/presentation */
  607. seq_printf(m, "v: %u\n\n", 0);
  608. if (get_ldev_if_state(device, D_FAILED)) {
  609. lc_seq_printf_stats(m, device->resync);
  610. lc_seq_dump_details(m, device->resync, "rs_left flags", resync_dump_detail);
  611. put_ldev(device);
  612. }
  613. return 0;
  614. }
  615. static int device_act_log_extents_show(struct seq_file *m, void *ignored)
  616. {
  617. struct drbd_device *device = m->private;
  618. /* BUMP me if you change the file format/content/presentation */
  619. seq_printf(m, "v: %u\n\n", 0);
  620. if (get_ldev_if_state(device, D_FAILED)) {
  621. lc_seq_printf_stats(m, device->act_log);
  622. lc_seq_dump_details(m, device->act_log, "", NULL);
  623. put_ldev(device);
  624. }
  625. return 0;
  626. }
  627. static int device_oldest_requests_show(struct seq_file *m, void *ignored)
  628. {
  629. struct drbd_device *device = m->private;
  630. struct drbd_resource *resource = device->resource;
  631. unsigned long now = jiffies;
  632. struct drbd_request *r1, *r2;
  633. int i;
  634. /* BUMP me if you change the file format/content/presentation */
  635. seq_printf(m, "v: %u\n\n", 0);
  636. seq_puts(m, RQ_HDR);
  637. spin_lock_irq(&resource->req_lock);
  638. /* WRITE, then READ */
  639. for (i = 1; i >= 0; --i) {
  640. r1 = list_first_entry_or_null(&device->pending_master_completion[i],
  641. struct drbd_request, req_pending_master_completion);
  642. r2 = list_first_entry_or_null(&device->pending_completion[i],
  643. struct drbd_request, req_pending_local);
  644. if (r1)
  645. seq_print_one_request(m, r1, now);
  646. if (r2 && r2 != r1)
  647. seq_print_one_request(m, r2, now);
  648. }
  649. spin_unlock_irq(&resource->req_lock);
  650. return 0;
  651. }
  652. static int device_data_gen_id_show(struct seq_file *m, void *ignored)
  653. {
  654. struct drbd_device *device = m->private;
  655. struct drbd_md *md;
  656. enum drbd_uuid_index idx;
  657. if (!get_ldev_if_state(device, D_FAILED))
  658. return -ENODEV;
  659. md = &device->ldev->md;
  660. spin_lock_irq(&md->uuid_lock);
  661. for (idx = UI_CURRENT; idx <= UI_HISTORY_END; idx++) {
  662. seq_printf(m, "0x%016llX\n", md->uuid[idx]);
  663. }
  664. spin_unlock_irq(&md->uuid_lock);
  665. put_ldev(device);
  666. return 0;
  667. }
  668. static int device_ed_gen_id_show(struct seq_file *m, void *ignored)
  669. {
  670. struct drbd_device *device = m->private;
  671. seq_printf(m, "0x%016llX\n", (unsigned long long)device->ed_uuid);
  672. return 0;
  673. }
  674. #define drbd_debugfs_device_attr(name) \
  675. static int device_ ## name ## _open(struct inode *inode, struct file *file) \
  676. { \
  677. struct drbd_device *device = inode->i_private; \
  678. return drbd_single_open(file, device_ ## name ## _show, device, \
  679. &device->kref, drbd_destroy_device); \
  680. } \
  681. static int device_ ## name ## _release(struct inode *inode, struct file *file) \
  682. { \
  683. struct drbd_device *device = inode->i_private; \
  684. kref_put(&device->kref, drbd_destroy_device); \
  685. return single_release(inode, file); \
  686. } \
  687. static const struct file_operations device_ ## name ## _fops = { \
  688. .owner = THIS_MODULE, \
  689. .open = device_ ## name ## _open, \
  690. .read = seq_read, \
  691. .llseek = seq_lseek, \
  692. .release = device_ ## name ## _release, \
  693. };
  694. drbd_debugfs_device_attr(oldest_requests)
  695. drbd_debugfs_device_attr(act_log_extents)
  696. drbd_debugfs_device_attr(resync_extents)
  697. drbd_debugfs_device_attr(data_gen_id)
  698. drbd_debugfs_device_attr(ed_gen_id)
  699. void drbd_debugfs_device_add(struct drbd_device *device)
  700. {
  701. struct dentry *vols_dir = device->resource->debugfs_res_volumes;
  702. char minor_buf[8]; /* MINORMASK, MINORBITS == 20; */
  703. char vnr_buf[8]; /* volume number vnr is even 16 bit only; */
  704. char *slink_name = NULL;
  705. struct dentry *dentry;
  706. if (!vols_dir || !drbd_debugfs_minors)
  707. return;
  708. snprintf(vnr_buf, sizeof(vnr_buf), "%u", device->vnr);
  709. dentry = debugfs_create_dir(vnr_buf, vols_dir);
  710. if (IS_ERR_OR_NULL(dentry))
  711. goto fail;
  712. device->debugfs_vol = dentry;
  713. snprintf(minor_buf, sizeof(minor_buf), "%u", device->minor);
  714. slink_name = kasprintf(GFP_KERNEL, "../resources/%s/volumes/%u",
  715. device->resource->name, device->vnr);
  716. if (!slink_name)
  717. goto fail;
  718. dentry = debugfs_create_symlink(minor_buf, drbd_debugfs_minors, slink_name);
  719. kfree(slink_name);
  720. slink_name = NULL;
  721. if (IS_ERR_OR_NULL(dentry))
  722. goto fail;
  723. device->debugfs_minor = dentry;
  724. #define DCF(name) do { \
  725. dentry = debugfs_create_file(#name, S_IRUSR|S_IRGRP, \
  726. device->debugfs_vol, device, \
  727. &device_ ## name ## _fops); \
  728. if (IS_ERR_OR_NULL(dentry)) \
  729. goto fail; \
  730. device->debugfs_vol_ ## name = dentry; \
  731. } while (0)
  732. DCF(oldest_requests);
  733. DCF(act_log_extents);
  734. DCF(resync_extents);
  735. DCF(data_gen_id);
  736. DCF(ed_gen_id);
  737. #undef DCF
  738. return;
  739. fail:
  740. drbd_debugfs_device_cleanup(device);
  741. drbd_err(device, "failed to create debugfs entries\n");
  742. }
  743. void drbd_debugfs_device_cleanup(struct drbd_device *device)
  744. {
  745. drbd_debugfs_remove(&device->debugfs_minor);
  746. drbd_debugfs_remove(&device->debugfs_vol_oldest_requests);
  747. drbd_debugfs_remove(&device->debugfs_vol_act_log_extents);
  748. drbd_debugfs_remove(&device->debugfs_vol_resync_extents);
  749. drbd_debugfs_remove(&device->debugfs_vol_data_gen_id);
  750. drbd_debugfs_remove(&device->debugfs_vol_ed_gen_id);
  751. drbd_debugfs_remove(&device->debugfs_vol);
  752. }
  753. void drbd_debugfs_peer_device_add(struct drbd_peer_device *peer_device)
  754. {
  755. struct dentry *conn_dir = peer_device->connection->debugfs_conn;
  756. struct dentry *dentry;
  757. char vnr_buf[8];
  758. if (!conn_dir)
  759. return;
  760. snprintf(vnr_buf, sizeof(vnr_buf), "%u", peer_device->device->vnr);
  761. dentry = debugfs_create_dir(vnr_buf, conn_dir);
  762. if (IS_ERR_OR_NULL(dentry))
  763. goto fail;
  764. peer_device->debugfs_peer_dev = dentry;
  765. return;
  766. fail:
  767. drbd_debugfs_peer_device_cleanup(peer_device);
  768. drbd_err(peer_device, "failed to create debugfs entries\n");
  769. }
  770. void drbd_debugfs_peer_device_cleanup(struct drbd_peer_device *peer_device)
  771. {
  772. drbd_debugfs_remove(&peer_device->debugfs_peer_dev);
  773. }
  774. static int drbd_version_show(struct seq_file *m, void *ignored)
  775. {
  776. seq_printf(m, "# %s\n", drbd_buildtag());
  777. seq_printf(m, "VERSION=%s\n", REL_VERSION);
  778. seq_printf(m, "API_VERSION=%u\n", API_VERSION);
  779. seq_printf(m, "PRO_VERSION_MIN=%u\n", PRO_VERSION_MIN);
  780. seq_printf(m, "PRO_VERSION_MAX=%u\n", PRO_VERSION_MAX);
  781. return 0;
  782. }
  783. static int drbd_version_open(struct inode *inode, struct file *file)
  784. {
  785. return single_open(file, drbd_version_show, NULL);
  786. }
  787. static const struct file_operations drbd_version_fops = {
  788. .owner = THIS_MODULE,
  789. .open = drbd_version_open,
  790. .llseek = seq_lseek,
  791. .read = seq_read,
  792. .release = single_release,
  793. };
  794. /* not __exit, may be indirectly called
  795. * from the module-load-failure path as well. */
  796. void drbd_debugfs_cleanup(void)
  797. {
  798. drbd_debugfs_remove(&drbd_debugfs_resources);
  799. drbd_debugfs_remove(&drbd_debugfs_minors);
  800. drbd_debugfs_remove(&drbd_debugfs_version);
  801. drbd_debugfs_remove(&drbd_debugfs_root);
  802. }
  803. int __init drbd_debugfs_init(void)
  804. {
  805. struct dentry *dentry;
  806. dentry = debugfs_create_dir("drbd", NULL);
  807. if (IS_ERR_OR_NULL(dentry))
  808. goto fail;
  809. drbd_debugfs_root = dentry;
  810. dentry = debugfs_create_file("version", 0444, drbd_debugfs_root, NULL, &drbd_version_fops);
  811. if (IS_ERR_OR_NULL(dentry))
  812. goto fail;
  813. drbd_debugfs_version = dentry;
  814. dentry = debugfs_create_dir("resources", drbd_debugfs_root);
  815. if (IS_ERR_OR_NULL(dentry))
  816. goto fail;
  817. drbd_debugfs_resources = dentry;
  818. dentry = debugfs_create_dir("minors", drbd_debugfs_root);
  819. if (IS_ERR_OR_NULL(dentry))
  820. goto fail;
  821. drbd_debugfs_minors = dentry;
  822. return 0;
  823. fail:
  824. drbd_debugfs_cleanup();
  825. if (dentry)
  826. return PTR_ERR(dentry);
  827. else
  828. return -EINVAL;
  829. }