sysfs.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * bcache sysfs interfaces
  4. *
  5. * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  6. * Copyright 2012 Google, Inc.
  7. */
  8. #include "bcache.h"
  9. #include "sysfs.h"
  10. #include "btree.h"
  11. #include "request.h"
  12. #include "writeback.h"
  13. #include <linux/blkdev.h>
  14. #include <linux/sort.h>
  15. #include <linux/sched/clock.h>
  16. extern bool bcache_is_reboot;
  17. /* Default is 0 ("writethrough") */
  18. static const char * const bch_cache_modes[] = {
  19. "writethrough",
  20. "writeback",
  21. "writearound",
  22. "none",
  23. NULL
  24. };
  25. static const char * const bch_reada_cache_policies[] = {
  26. "all",
  27. "meta-only",
  28. NULL
  29. };
  30. /* Default is 0 ("auto") */
  31. static const char * const bch_stop_on_failure_modes[] = {
  32. "auto",
  33. "always",
  34. NULL
  35. };
  36. static const char * const cache_replacement_policies[] = {
  37. "lru",
  38. "fifo",
  39. "random",
  40. NULL
  41. };
  42. static const char * const error_actions[] = {
  43. "unregister",
  44. "panic",
  45. NULL
  46. };
  47. write_attribute(attach);
  48. write_attribute(detach);
  49. write_attribute(unregister);
  50. write_attribute(stop);
  51. write_attribute(clear_stats);
  52. write_attribute(trigger_gc);
  53. write_attribute(prune_cache);
  54. write_attribute(flash_vol_create);
  55. read_attribute(bucket_size);
  56. read_attribute(block_size);
  57. read_attribute(nbuckets);
  58. read_attribute(tree_depth);
  59. read_attribute(root_usage_percent);
  60. read_attribute(priority_stats);
  61. read_attribute(btree_cache_size);
  62. read_attribute(btree_cache_max_chain);
  63. read_attribute(cache_available_percent);
  64. read_attribute(written);
  65. read_attribute(btree_written);
  66. read_attribute(metadata_written);
  67. read_attribute(active_journal_entries);
  68. read_attribute(backing_dev_name);
  69. read_attribute(backing_dev_uuid);
  70. sysfs_time_stats_attribute(btree_gc, sec, ms);
  71. sysfs_time_stats_attribute(btree_split, sec, us);
  72. sysfs_time_stats_attribute(btree_sort, ms, us);
  73. sysfs_time_stats_attribute(btree_read, ms, us);
  74. read_attribute(btree_nodes);
  75. read_attribute(btree_used_percent);
  76. read_attribute(average_key_size);
  77. read_attribute(dirty_data);
  78. read_attribute(bset_tree_stats);
  79. read_attribute(state);
  80. read_attribute(cache_read_races);
  81. read_attribute(reclaim);
  82. read_attribute(reclaimed_journal_buckets);
  83. read_attribute(flush_write);
  84. read_attribute(writeback_keys_done);
  85. read_attribute(writeback_keys_failed);
  86. read_attribute(io_errors);
  87. read_attribute(congested);
  88. read_attribute(cutoff_writeback);
  89. read_attribute(cutoff_writeback_sync);
  90. rw_attribute(congested_read_threshold_us);
  91. rw_attribute(congested_write_threshold_us);
  92. rw_attribute(sequential_cutoff);
  93. rw_attribute(data_csum);
  94. rw_attribute(cache_mode);
  95. rw_attribute(readahead_cache_policy);
  96. rw_attribute(stop_when_cache_set_failed);
  97. rw_attribute(writeback_metadata);
  98. rw_attribute(writeback_running);
  99. rw_attribute(writeback_percent);
  100. rw_attribute(writeback_delay);
  101. rw_attribute(writeback_rate);
  102. rw_attribute(writeback_rate_update_seconds);
  103. rw_attribute(writeback_rate_i_term_inverse);
  104. rw_attribute(writeback_rate_p_term_inverse);
  105. rw_attribute(writeback_rate_minimum);
  106. read_attribute(writeback_rate_debug);
  107. read_attribute(stripe_size);
  108. read_attribute(partial_stripes_expensive);
  109. rw_attribute(synchronous);
  110. rw_attribute(journal_delay_ms);
  111. rw_attribute(io_disable);
  112. rw_attribute(discard);
  113. rw_attribute(running);
  114. rw_attribute(label);
  115. rw_attribute(readahead);
  116. rw_attribute(errors);
  117. rw_attribute(io_error_limit);
  118. rw_attribute(io_error_halflife);
  119. rw_attribute(verify);
  120. rw_attribute(bypass_torture_test);
  121. rw_attribute(key_merging_disabled);
  122. rw_attribute(gc_always_rewrite);
  123. rw_attribute(expensive_debug_checks);
  124. rw_attribute(cache_replacement_policy);
  125. rw_attribute(btree_shrinker_disabled);
  126. rw_attribute(copy_gc_enabled);
  127. rw_attribute(gc_after_writeback);
  128. rw_attribute(size);
  129. static ssize_t bch_snprint_string_list(char *buf,
  130. size_t size,
  131. const char * const list[],
  132. size_t selected)
  133. {
  134. char *out = buf;
  135. size_t i;
  136. for (i = 0; list[i]; i++)
  137. out += snprintf(out, buf + size - out,
  138. i == selected ? "[%s] " : "%s ", list[i]);
  139. out[-1] = '\n';
  140. return out - buf;
  141. }
  142. SHOW(__bch_cached_dev)
  143. {
  144. struct cached_dev *dc = container_of(kobj, struct cached_dev,
  145. disk.kobj);
  146. char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
  147. int wb = dc->writeback_running;
  148. #define var(stat) (dc->stat)
  149. if (attr == &sysfs_cache_mode)
  150. return bch_snprint_string_list(buf, PAGE_SIZE,
  151. bch_cache_modes,
  152. BDEV_CACHE_MODE(&dc->sb));
  153. if (attr == &sysfs_readahead_cache_policy)
  154. return bch_snprint_string_list(buf, PAGE_SIZE,
  155. bch_reada_cache_policies,
  156. dc->cache_readahead_policy);
  157. if (attr == &sysfs_stop_when_cache_set_failed)
  158. return bch_snprint_string_list(buf, PAGE_SIZE,
  159. bch_stop_on_failure_modes,
  160. dc->stop_when_cache_set_failed);
  161. sysfs_printf(data_csum, "%i", dc->disk.data_csum);
  162. var_printf(verify, "%i");
  163. var_printf(bypass_torture_test, "%i");
  164. var_printf(writeback_metadata, "%i");
  165. var_printf(writeback_running, "%i");
  166. var_print(writeback_delay);
  167. var_print(writeback_percent);
  168. sysfs_hprint(writeback_rate,
  169. wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
  170. sysfs_printf(io_errors, "%i", atomic_read(&dc->io_errors));
  171. sysfs_printf(io_error_limit, "%i", dc->error_limit);
  172. sysfs_printf(io_disable, "%i", dc->io_disable);
  173. var_print(writeback_rate_update_seconds);
  174. var_print(writeback_rate_i_term_inverse);
  175. var_print(writeback_rate_p_term_inverse);
  176. var_print(writeback_rate_minimum);
  177. if (attr == &sysfs_writeback_rate_debug) {
  178. char rate[20];
  179. char dirty[20];
  180. char target[20];
  181. char proportional[20];
  182. char integral[20];
  183. char change[20];
  184. s64 next_io;
  185. /*
  186. * Except for dirty and target, other values should
  187. * be 0 if writeback is not running.
  188. */
  189. bch_hprint(rate,
  190. wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
  191. : 0);
  192. bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
  193. bch_hprint(target, dc->writeback_rate_target << 9);
  194. bch_hprint(proportional,
  195. wb ? dc->writeback_rate_proportional << 9 : 0);
  196. bch_hprint(integral,
  197. wb ? dc->writeback_rate_integral_scaled << 9 : 0);
  198. bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
  199. next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
  200. NSEC_PER_MSEC) : 0;
  201. return sprintf(buf,
  202. "rate:\t\t%s/sec\n"
  203. "dirty:\t\t%s\n"
  204. "target:\t\t%s\n"
  205. "proportional:\t%s\n"
  206. "integral:\t%s\n"
  207. "change:\t\t%s/sec\n"
  208. "next io:\t%llims\n",
  209. rate, dirty, target, proportional,
  210. integral, change, next_io);
  211. }
  212. sysfs_hprint(dirty_data,
  213. bcache_dev_sectors_dirty(&dc->disk) << 9);
  214. sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9);
  215. var_printf(partial_stripes_expensive, "%u");
  216. var_hprint(sequential_cutoff);
  217. var_hprint(readahead);
  218. sysfs_print(running, atomic_read(&dc->running));
  219. sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
  220. if (attr == &sysfs_label) {
  221. memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
  222. buf[SB_LABEL_SIZE + 1] = '\0';
  223. strcat(buf, "\n");
  224. return strlen(buf);
  225. }
  226. if (attr == &sysfs_backing_dev_name) {
  227. snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
  228. strcat(buf, "\n");
  229. return strlen(buf);
  230. }
  231. if (attr == &sysfs_backing_dev_uuid) {
  232. /* convert binary uuid into 36-byte string plus '\0' */
  233. snprintf(buf, 36+1, "%pU", dc->sb.uuid);
  234. strcat(buf, "\n");
  235. return strlen(buf);
  236. }
  237. #undef var
  238. return 0;
  239. }
  240. SHOW_LOCKED(bch_cached_dev)
  241. STORE(__cached_dev)
  242. {
  243. struct cached_dev *dc = container_of(kobj, struct cached_dev,
  244. disk.kobj);
  245. ssize_t v;
  246. struct cache_set *c;
  247. struct kobj_uevent_env *env;
  248. /* no user space access if system is rebooting */
  249. if (bcache_is_reboot)
  250. return -EBUSY;
  251. #define d_strtoul(var) sysfs_strtoul(var, dc->var)
  252. #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
  253. #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
  254. sysfs_strtoul(data_csum, dc->disk.data_csum);
  255. d_strtoul(verify);
  256. sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
  257. sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
  258. sysfs_strtoul_bool(writeback_running, dc->writeback_running);
  259. sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
  260. sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
  261. 0, bch_cutoff_writeback);
  262. if (attr == &sysfs_writeback_rate) {
  263. ssize_t ret;
  264. long int v = atomic_long_read(&dc->writeback_rate.rate);
  265. ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
  266. if (!ret) {
  267. atomic_long_set(&dc->writeback_rate.rate, v);
  268. ret = size;
  269. }
  270. return ret;
  271. }
  272. sysfs_strtoul_clamp(writeback_rate_update_seconds,
  273. dc->writeback_rate_update_seconds,
  274. 1, WRITEBACK_RATE_UPDATE_SECS_MAX);
  275. sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
  276. dc->writeback_rate_i_term_inverse,
  277. 1, UINT_MAX);
  278. sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
  279. dc->writeback_rate_p_term_inverse,
  280. 1, UINT_MAX);
  281. sysfs_strtoul_clamp(writeback_rate_minimum,
  282. dc->writeback_rate_minimum,
  283. 1, UINT_MAX);
  284. sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
  285. if (attr == &sysfs_io_disable) {
  286. int v = strtoul_or_return(buf);
  287. dc->io_disable = v ? 1 : 0;
  288. }
  289. sysfs_strtoul_clamp(sequential_cutoff,
  290. dc->sequential_cutoff,
  291. 0, UINT_MAX);
  292. d_strtoi_h(readahead);
  293. if (attr == &sysfs_clear_stats)
  294. bch_cache_accounting_clear(&dc->accounting);
  295. if (attr == &sysfs_running &&
  296. strtoul_or_return(buf)) {
  297. v = bch_cached_dev_run(dc);
  298. if (v)
  299. return v;
  300. }
  301. if (attr == &sysfs_cache_mode) {
  302. v = __sysfs_match_string(bch_cache_modes, -1, buf);
  303. if (v < 0)
  304. return v;
  305. if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
  306. SET_BDEV_CACHE_MODE(&dc->sb, v);
  307. bch_write_bdev_super(dc, NULL);
  308. }
  309. }
  310. if (attr == &sysfs_readahead_cache_policy) {
  311. v = __sysfs_match_string(bch_reada_cache_policies, -1, buf);
  312. if (v < 0)
  313. return v;
  314. if ((unsigned int) v != dc->cache_readahead_policy)
  315. dc->cache_readahead_policy = v;
  316. }
  317. if (attr == &sysfs_stop_when_cache_set_failed) {
  318. v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
  319. if (v < 0)
  320. return v;
  321. dc->stop_when_cache_set_failed = v;
  322. }
  323. if (attr == &sysfs_label) {
  324. if (size > SB_LABEL_SIZE)
  325. return -EINVAL;
  326. memcpy(dc->sb.label, buf, size);
  327. if (size < SB_LABEL_SIZE)
  328. dc->sb.label[size] = '\0';
  329. if (size && dc->sb.label[size - 1] == '\n')
  330. dc->sb.label[size - 1] = '\0';
  331. bch_write_bdev_super(dc, NULL);
  332. if (dc->disk.c) {
  333. memcpy(dc->disk.c->uuids[dc->disk.id].label,
  334. buf, SB_LABEL_SIZE);
  335. bch_uuid_write(dc->disk.c);
  336. }
  337. env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
  338. if (!env)
  339. return -ENOMEM;
  340. add_uevent_var(env, "DRIVER=bcache");
  341. add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
  342. add_uevent_var(env, "CACHED_LABEL=%s", buf);
  343. kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
  344. KOBJ_CHANGE,
  345. env->envp);
  346. kfree(env);
  347. }
  348. if (attr == &sysfs_attach) {
  349. uint8_t set_uuid[16];
  350. if (bch_parse_uuid(buf, set_uuid) < 16)
  351. return -EINVAL;
  352. v = -ENOENT;
  353. list_for_each_entry(c, &bch_cache_sets, list) {
  354. v = bch_cached_dev_attach(dc, c, set_uuid);
  355. if (!v)
  356. return size;
  357. }
  358. if (v == -ENOENT)
  359. pr_err("Can't attach %s: cache set not found", buf);
  360. return v;
  361. }
  362. if (attr == &sysfs_detach && dc->disk.c)
  363. bch_cached_dev_detach(dc);
  364. if (attr == &sysfs_stop)
  365. bcache_device_stop(&dc->disk);
  366. return size;
  367. }
  368. STORE(bch_cached_dev)
  369. {
  370. struct cached_dev *dc = container_of(kobj, struct cached_dev,
  371. disk.kobj);
  372. /* no user space access if system is rebooting */
  373. if (bcache_is_reboot)
  374. return -EBUSY;
  375. mutex_lock(&bch_register_lock);
  376. size = __cached_dev_store(kobj, attr, buf, size);
  377. if (attr == &sysfs_writeback_running) {
  378. /* dc->writeback_running changed in __cached_dev_store() */
  379. if (IS_ERR_OR_NULL(dc->writeback_thread)) {
  380. /*
  381. * reject setting it to 1 via sysfs if writeback
  382. * kthread is not created yet.
  383. */
  384. if (dc->writeback_running) {
  385. dc->writeback_running = false;
  386. pr_err("%s: failed to run non-existent writeback thread",
  387. dc->disk.disk->disk_name);
  388. }
  389. } else
  390. /*
  391. * writeback kthread will check if dc->writeback_running
  392. * is true or false.
  393. */
  394. bch_writeback_queue(dc);
  395. }
  396. /*
  397. * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
  398. * a cache set, otherwise it doesn't make sense.
  399. */
  400. if (attr == &sysfs_writeback_percent)
  401. if ((dc->disk.c != NULL) &&
  402. (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
  403. schedule_delayed_work(&dc->writeback_rate_update,
  404. dc->writeback_rate_update_seconds * HZ);
  405. mutex_unlock(&bch_register_lock);
  406. return size;
  407. }
  408. static struct attribute *bch_cached_dev_files[] = {
  409. &sysfs_attach,
  410. &sysfs_detach,
  411. &sysfs_stop,
  412. #if 0
  413. &sysfs_data_csum,
  414. #endif
  415. &sysfs_cache_mode,
  416. &sysfs_readahead_cache_policy,
  417. &sysfs_stop_when_cache_set_failed,
  418. &sysfs_writeback_metadata,
  419. &sysfs_writeback_running,
  420. &sysfs_writeback_delay,
  421. &sysfs_writeback_percent,
  422. &sysfs_writeback_rate,
  423. &sysfs_writeback_rate_update_seconds,
  424. &sysfs_writeback_rate_i_term_inverse,
  425. &sysfs_writeback_rate_p_term_inverse,
  426. &sysfs_writeback_rate_minimum,
  427. &sysfs_writeback_rate_debug,
  428. &sysfs_io_errors,
  429. &sysfs_io_error_limit,
  430. &sysfs_io_disable,
  431. &sysfs_dirty_data,
  432. &sysfs_stripe_size,
  433. &sysfs_partial_stripes_expensive,
  434. &sysfs_sequential_cutoff,
  435. &sysfs_clear_stats,
  436. &sysfs_running,
  437. &sysfs_state,
  438. &sysfs_label,
  439. &sysfs_readahead,
  440. #ifdef CONFIG_BCACHE_DEBUG
  441. &sysfs_verify,
  442. &sysfs_bypass_torture_test,
  443. #endif
  444. &sysfs_backing_dev_name,
  445. &sysfs_backing_dev_uuid,
  446. NULL
  447. };
  448. KTYPE(bch_cached_dev);
  449. SHOW(bch_flash_dev)
  450. {
  451. struct bcache_device *d = container_of(kobj, struct bcache_device,
  452. kobj);
  453. struct uuid_entry *u = &d->c->uuids[d->id];
  454. sysfs_printf(data_csum, "%i", d->data_csum);
  455. sysfs_hprint(size, u->sectors << 9);
  456. if (attr == &sysfs_label) {
  457. memcpy(buf, u->label, SB_LABEL_SIZE);
  458. buf[SB_LABEL_SIZE + 1] = '\0';
  459. strcat(buf, "\n");
  460. return strlen(buf);
  461. }
  462. return 0;
  463. }
  464. STORE(__bch_flash_dev)
  465. {
  466. struct bcache_device *d = container_of(kobj, struct bcache_device,
  467. kobj);
  468. struct uuid_entry *u = &d->c->uuids[d->id];
  469. /* no user space access if system is rebooting */
  470. if (bcache_is_reboot)
  471. return -EBUSY;
  472. sysfs_strtoul(data_csum, d->data_csum);
  473. if (attr == &sysfs_size) {
  474. uint64_t v;
  475. strtoi_h_or_return(buf, v);
  476. u->sectors = v >> 9;
  477. bch_uuid_write(d->c);
  478. set_capacity(d->disk, u->sectors);
  479. }
  480. if (attr == &sysfs_label) {
  481. memcpy(u->label, buf, SB_LABEL_SIZE);
  482. bch_uuid_write(d->c);
  483. }
  484. if (attr == &sysfs_unregister) {
  485. set_bit(BCACHE_DEV_DETACHING, &d->flags);
  486. bcache_device_stop(d);
  487. }
  488. return size;
  489. }
  490. STORE_LOCKED(bch_flash_dev)
  491. static struct attribute *bch_flash_dev_files[] = {
  492. &sysfs_unregister,
  493. #if 0
  494. &sysfs_data_csum,
  495. #endif
  496. &sysfs_label,
  497. &sysfs_size,
  498. NULL
  499. };
  500. KTYPE(bch_flash_dev);
  501. struct bset_stats_op {
  502. struct btree_op op;
  503. size_t nodes;
  504. struct bset_stats stats;
  505. };
  506. static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
  507. {
  508. struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
  509. op->nodes++;
  510. bch_btree_keys_stats(&b->keys, &op->stats);
  511. return MAP_CONTINUE;
  512. }
  513. static int bch_bset_print_stats(struct cache_set *c, char *buf)
  514. {
  515. struct bset_stats_op op;
  516. int ret;
  517. memset(&op, 0, sizeof(op));
  518. bch_btree_op_init(&op.op, -1);
  519. ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
  520. if (ret < 0)
  521. return ret;
  522. return snprintf(buf, PAGE_SIZE,
  523. "btree nodes: %zu\n"
  524. "written sets: %zu\n"
  525. "unwritten sets: %zu\n"
  526. "written key bytes: %zu\n"
  527. "unwritten key bytes: %zu\n"
  528. "floats: %zu\n"
  529. "failed: %zu\n",
  530. op.nodes,
  531. op.stats.sets_written, op.stats.sets_unwritten,
  532. op.stats.bytes_written, op.stats.bytes_unwritten,
  533. op.stats.floats, op.stats.failed);
  534. }
  535. static unsigned int bch_root_usage(struct cache_set *c)
  536. {
  537. unsigned int bytes = 0;
  538. struct bkey *k;
  539. struct btree *b;
  540. struct btree_iter iter;
  541. goto lock_root;
  542. do {
  543. rw_unlock(false, b);
  544. lock_root:
  545. b = c->root;
  546. rw_lock(false, b, b->level);
  547. } while (b != c->root);
  548. for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
  549. bytes += bkey_bytes(k);
  550. rw_unlock(false, b);
  551. return (bytes * 100) / btree_bytes(c);
  552. }
  553. static size_t bch_cache_size(struct cache_set *c)
  554. {
  555. size_t ret = 0;
  556. struct btree *b;
  557. mutex_lock(&c->bucket_lock);
  558. list_for_each_entry(b, &c->btree_cache, list)
  559. ret += 1 << (b->keys.page_order + PAGE_SHIFT);
  560. mutex_unlock(&c->bucket_lock);
  561. return ret;
  562. }
  563. static unsigned int bch_cache_max_chain(struct cache_set *c)
  564. {
  565. unsigned int ret = 0;
  566. struct hlist_head *h;
  567. mutex_lock(&c->bucket_lock);
  568. for (h = c->bucket_hash;
  569. h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
  570. h++) {
  571. unsigned int i = 0;
  572. struct hlist_node *p;
  573. hlist_for_each(p, h)
  574. i++;
  575. ret = max(ret, i);
  576. }
  577. mutex_unlock(&c->bucket_lock);
  578. return ret;
  579. }
  580. static unsigned int bch_btree_used(struct cache_set *c)
  581. {
  582. return div64_u64(c->gc_stats.key_bytes * 100,
  583. (c->gc_stats.nodes ?: 1) * btree_bytes(c));
  584. }
  585. static unsigned int bch_average_key_size(struct cache_set *c)
  586. {
  587. return c->gc_stats.nkeys
  588. ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
  589. : 0;
  590. }
  591. SHOW(__bch_cache_set)
  592. {
  593. struct cache_set *c = container_of(kobj, struct cache_set, kobj);
  594. sysfs_print(synchronous, CACHE_SYNC(&c->sb));
  595. sysfs_print(journal_delay_ms, c->journal_delay_ms);
  596. sysfs_hprint(bucket_size, bucket_bytes(c));
  597. sysfs_hprint(block_size, block_bytes(c));
  598. sysfs_print(tree_depth, c->root->level);
  599. sysfs_print(root_usage_percent, bch_root_usage(c));
  600. sysfs_hprint(btree_cache_size, bch_cache_size(c));
  601. sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
  602. sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
  603. sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
  604. sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
  605. sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
  606. sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
  607. sysfs_print(btree_used_percent, bch_btree_used(c));
  608. sysfs_print(btree_nodes, c->gc_stats.nodes);
  609. sysfs_hprint(average_key_size, bch_average_key_size(c));
  610. sysfs_print(cache_read_races,
  611. atomic_long_read(&c->cache_read_races));
  612. sysfs_print(reclaim,
  613. atomic_long_read(&c->reclaim));
  614. sysfs_print(reclaimed_journal_buckets,
  615. atomic_long_read(&c->reclaimed_journal_buckets));
  616. sysfs_print(flush_write,
  617. atomic_long_read(&c->flush_write));
  618. sysfs_print(writeback_keys_done,
  619. atomic_long_read(&c->writeback_keys_done));
  620. sysfs_print(writeback_keys_failed,
  621. atomic_long_read(&c->writeback_keys_failed));
  622. if (attr == &sysfs_errors)
  623. return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
  624. c->on_error);
  625. /* See count_io_errors for why 88 */
  626. sysfs_print(io_error_halflife, c->error_decay * 88);
  627. sysfs_print(io_error_limit, c->error_limit);
  628. sysfs_hprint(congested,
  629. ((uint64_t) bch_get_congested(c)) << 9);
  630. sysfs_print(congested_read_threshold_us,
  631. c->congested_read_threshold_us);
  632. sysfs_print(congested_write_threshold_us,
  633. c->congested_write_threshold_us);
  634. sysfs_print(cutoff_writeback, bch_cutoff_writeback);
  635. sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
  636. sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
  637. sysfs_printf(verify, "%i", c->verify);
  638. sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
  639. sysfs_printf(expensive_debug_checks,
  640. "%i", c->expensive_debug_checks);
  641. sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
  642. sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
  643. sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
  644. sysfs_printf(gc_after_writeback, "%i", c->gc_after_writeback);
  645. sysfs_printf(io_disable, "%i",
  646. test_bit(CACHE_SET_IO_DISABLE, &c->flags));
  647. if (attr == &sysfs_bset_tree_stats)
  648. return bch_bset_print_stats(c, buf);
  649. return 0;
  650. }
  651. SHOW_LOCKED(bch_cache_set)
  652. STORE(__bch_cache_set)
  653. {
  654. struct cache_set *c = container_of(kobj, struct cache_set, kobj);
  655. ssize_t v;
  656. /* no user space access if system is rebooting */
  657. if (bcache_is_reboot)
  658. return -EBUSY;
  659. if (attr == &sysfs_unregister)
  660. bch_cache_set_unregister(c);
  661. if (attr == &sysfs_stop)
  662. bch_cache_set_stop(c);
  663. if (attr == &sysfs_synchronous) {
  664. bool sync = strtoul_or_return(buf);
  665. if (sync != CACHE_SYNC(&c->sb)) {
  666. SET_CACHE_SYNC(&c->sb, sync);
  667. bcache_write_super(c);
  668. }
  669. }
  670. if (attr == &sysfs_flash_vol_create) {
  671. int r;
  672. uint64_t v;
  673. strtoi_h_or_return(buf, v);
  674. r = bch_flash_dev_create(c, v);
  675. if (r)
  676. return r;
  677. }
  678. if (attr == &sysfs_clear_stats) {
  679. atomic_long_set(&c->writeback_keys_done, 0);
  680. atomic_long_set(&c->writeback_keys_failed, 0);
  681. memset(&c->gc_stats, 0, sizeof(struct gc_stat));
  682. bch_cache_accounting_clear(&c->accounting);
  683. }
  684. if (attr == &sysfs_trigger_gc)
  685. force_wake_up_gc(c);
  686. if (attr == &sysfs_prune_cache) {
  687. struct shrink_control sc;
  688. sc.gfp_mask = GFP_KERNEL;
  689. sc.nr_to_scan = strtoul_or_return(buf);
  690. c->shrink.scan_objects(&c->shrink, &sc);
  691. }
  692. sysfs_strtoul_clamp(congested_read_threshold_us,
  693. c->congested_read_threshold_us,
  694. 0, UINT_MAX);
  695. sysfs_strtoul_clamp(congested_write_threshold_us,
  696. c->congested_write_threshold_us,
  697. 0, UINT_MAX);
  698. if (attr == &sysfs_errors) {
  699. v = __sysfs_match_string(error_actions, -1, buf);
  700. if (v < 0)
  701. return v;
  702. c->on_error = v;
  703. }
  704. sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX);
  705. /* See count_io_errors() for why 88 */
  706. if (attr == &sysfs_io_error_halflife) {
  707. unsigned long v = 0;
  708. ssize_t ret;
  709. ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
  710. if (!ret) {
  711. c->error_decay = v / 88;
  712. return size;
  713. }
  714. return ret;
  715. }
  716. if (attr == &sysfs_io_disable) {
  717. v = strtoul_or_return(buf);
  718. if (v) {
  719. if (test_and_set_bit(CACHE_SET_IO_DISABLE,
  720. &c->flags))
  721. pr_warn("CACHE_SET_IO_DISABLE already set");
  722. } else {
  723. if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
  724. &c->flags))
  725. pr_warn("CACHE_SET_IO_DISABLE already cleared");
  726. }
  727. }
  728. sysfs_strtoul_clamp(journal_delay_ms,
  729. c->journal_delay_ms,
  730. 0, USHRT_MAX);
  731. sysfs_strtoul_bool(verify, c->verify);
  732. sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled);
  733. sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
  734. sysfs_strtoul_bool(gc_always_rewrite, c->gc_always_rewrite);
  735. sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
  736. sysfs_strtoul_bool(copy_gc_enabled, c->copy_gc_enabled);
  737. /*
  738. * write gc_after_writeback here may overwrite an already set
  739. * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
  740. * set in next chance.
  741. */
  742. sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
  743. return size;
  744. }
  745. STORE_LOCKED(bch_cache_set)
  746. SHOW(bch_cache_set_internal)
  747. {
  748. struct cache_set *c = container_of(kobj, struct cache_set, internal);
  749. return bch_cache_set_show(&c->kobj, attr, buf);
  750. }
  751. STORE(bch_cache_set_internal)
  752. {
  753. struct cache_set *c = container_of(kobj, struct cache_set, internal);
  754. /* no user space access if system is rebooting */
  755. if (bcache_is_reboot)
  756. return -EBUSY;
  757. return bch_cache_set_store(&c->kobj, attr, buf, size);
  758. }
  759. static void bch_cache_set_internal_release(struct kobject *k)
  760. {
  761. }
  762. static struct attribute *bch_cache_set_files[] = {
  763. &sysfs_unregister,
  764. &sysfs_stop,
  765. &sysfs_synchronous,
  766. &sysfs_journal_delay_ms,
  767. &sysfs_flash_vol_create,
  768. &sysfs_bucket_size,
  769. &sysfs_block_size,
  770. &sysfs_tree_depth,
  771. &sysfs_root_usage_percent,
  772. &sysfs_btree_cache_size,
  773. &sysfs_cache_available_percent,
  774. &sysfs_average_key_size,
  775. &sysfs_errors,
  776. &sysfs_io_error_limit,
  777. &sysfs_io_error_halflife,
  778. &sysfs_congested,
  779. &sysfs_congested_read_threshold_us,
  780. &sysfs_congested_write_threshold_us,
  781. &sysfs_clear_stats,
  782. NULL
  783. };
  784. KTYPE(bch_cache_set);
  785. static struct attribute *bch_cache_set_internal_files[] = {
  786. &sysfs_active_journal_entries,
  787. sysfs_time_stats_attribute_list(btree_gc, sec, ms)
  788. sysfs_time_stats_attribute_list(btree_split, sec, us)
  789. sysfs_time_stats_attribute_list(btree_sort, ms, us)
  790. sysfs_time_stats_attribute_list(btree_read, ms, us)
  791. &sysfs_btree_nodes,
  792. &sysfs_btree_used_percent,
  793. &sysfs_btree_cache_max_chain,
  794. &sysfs_bset_tree_stats,
  795. &sysfs_cache_read_races,
  796. &sysfs_reclaim,
  797. &sysfs_reclaimed_journal_buckets,
  798. &sysfs_flush_write,
  799. &sysfs_writeback_keys_done,
  800. &sysfs_writeback_keys_failed,
  801. &sysfs_trigger_gc,
  802. &sysfs_prune_cache,
  803. #ifdef CONFIG_BCACHE_DEBUG
  804. &sysfs_verify,
  805. &sysfs_key_merging_disabled,
  806. &sysfs_expensive_debug_checks,
  807. #endif
  808. &sysfs_gc_always_rewrite,
  809. &sysfs_btree_shrinker_disabled,
  810. &sysfs_copy_gc_enabled,
  811. &sysfs_gc_after_writeback,
  812. &sysfs_io_disable,
  813. &sysfs_cutoff_writeback,
  814. &sysfs_cutoff_writeback_sync,
  815. NULL
  816. };
  817. KTYPE(bch_cache_set_internal);
  818. static int __bch_cache_cmp(const void *l, const void *r)
  819. {
  820. cond_resched();
  821. return *((uint16_t *)r) - *((uint16_t *)l);
  822. }
  823. SHOW(__bch_cache)
  824. {
  825. struct cache *ca = container_of(kobj, struct cache, kobj);
  826. sysfs_hprint(bucket_size, bucket_bytes(ca));
  827. sysfs_hprint(block_size, block_bytes(ca));
  828. sysfs_print(nbuckets, ca->sb.nbuckets);
  829. sysfs_print(discard, ca->discard);
  830. sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
  831. sysfs_hprint(btree_written,
  832. atomic_long_read(&ca->btree_sectors_written) << 9);
  833. sysfs_hprint(metadata_written,
  834. (atomic_long_read(&ca->meta_sectors_written) +
  835. atomic_long_read(&ca->btree_sectors_written)) << 9);
  836. sysfs_print(io_errors,
  837. atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
  838. if (attr == &sysfs_cache_replacement_policy)
  839. return bch_snprint_string_list(buf, PAGE_SIZE,
  840. cache_replacement_policies,
  841. CACHE_REPLACEMENT(&ca->sb));
  842. if (attr == &sysfs_priority_stats) {
  843. struct bucket *b;
  844. size_t n = ca->sb.nbuckets, i;
  845. size_t unused = 0, available = 0, dirty = 0, meta = 0;
  846. uint64_t sum = 0;
  847. /* Compute 31 quantiles */
  848. uint16_t q[31], *p, *cached;
  849. ssize_t ret;
  850. cached = p = vmalloc(array_size(sizeof(uint16_t),
  851. ca->sb.nbuckets));
  852. if (!p)
  853. return -ENOMEM;
  854. mutex_lock(&ca->set->bucket_lock);
  855. for_each_bucket(b, ca) {
  856. if (!GC_SECTORS_USED(b))
  857. unused++;
  858. if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
  859. available++;
  860. if (GC_MARK(b) == GC_MARK_DIRTY)
  861. dirty++;
  862. if (GC_MARK(b) == GC_MARK_METADATA)
  863. meta++;
  864. }
  865. for (i = ca->sb.first_bucket; i < n; i++)
  866. p[i] = ca->buckets[i].prio;
  867. mutex_unlock(&ca->set->bucket_lock);
  868. sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
  869. while (n &&
  870. !cached[n - 1])
  871. --n;
  872. while (cached < p + n &&
  873. *cached == BTREE_PRIO)
  874. cached++, n--;
  875. for (i = 0; i < n; i++)
  876. sum += INITIAL_PRIO - cached[i];
  877. if (n)
  878. do_div(sum, n);
  879. for (i = 0; i < ARRAY_SIZE(q); i++)
  880. q[i] = INITIAL_PRIO - cached[n * (i + 1) /
  881. (ARRAY_SIZE(q) + 1)];
  882. vfree(p);
  883. ret = scnprintf(buf, PAGE_SIZE,
  884. "Unused: %zu%%\n"
  885. "Clean: %zu%%\n"
  886. "Dirty: %zu%%\n"
  887. "Metadata: %zu%%\n"
  888. "Average: %llu\n"
  889. "Sectors per Q: %zu\n"
  890. "Quantiles: [",
  891. unused * 100 / (size_t) ca->sb.nbuckets,
  892. available * 100 / (size_t) ca->sb.nbuckets,
  893. dirty * 100 / (size_t) ca->sb.nbuckets,
  894. meta * 100 / (size_t) ca->sb.nbuckets, sum,
  895. n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
  896. for (i = 0; i < ARRAY_SIZE(q); i++)
  897. ret += scnprintf(buf + ret, PAGE_SIZE - ret,
  898. "%u ", q[i]);
  899. ret--;
  900. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
  901. return ret;
  902. }
  903. return 0;
  904. }
  905. SHOW_LOCKED(bch_cache)
  906. STORE(__bch_cache)
  907. {
  908. struct cache *ca = container_of(kobj, struct cache, kobj);
  909. ssize_t v;
  910. /* no user space access if system is rebooting */
  911. if (bcache_is_reboot)
  912. return -EBUSY;
  913. if (attr == &sysfs_discard) {
  914. bool v = strtoul_or_return(buf);
  915. if (blk_queue_discard(bdev_get_queue(ca->bdev)))
  916. ca->discard = v;
  917. if (v != CACHE_DISCARD(&ca->sb)) {
  918. SET_CACHE_DISCARD(&ca->sb, v);
  919. bcache_write_super(ca->set);
  920. }
  921. }
  922. if (attr == &sysfs_cache_replacement_policy) {
  923. v = __sysfs_match_string(cache_replacement_policies, -1, buf);
  924. if (v < 0)
  925. return v;
  926. if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
  927. mutex_lock(&ca->set->bucket_lock);
  928. SET_CACHE_REPLACEMENT(&ca->sb, v);
  929. mutex_unlock(&ca->set->bucket_lock);
  930. bcache_write_super(ca->set);
  931. }
  932. }
  933. if (attr == &sysfs_clear_stats) {
  934. atomic_long_set(&ca->sectors_written, 0);
  935. atomic_long_set(&ca->btree_sectors_written, 0);
  936. atomic_long_set(&ca->meta_sectors_written, 0);
  937. atomic_set(&ca->io_count, 0);
  938. atomic_set(&ca->io_errors, 0);
  939. }
  940. return size;
  941. }
  942. STORE_LOCKED(bch_cache)
  943. static struct attribute *bch_cache_files[] = {
  944. &sysfs_bucket_size,
  945. &sysfs_block_size,
  946. &sysfs_nbuckets,
  947. &sysfs_priority_stats,
  948. &sysfs_discard,
  949. &sysfs_written,
  950. &sysfs_btree_written,
  951. &sysfs_metadata_written,
  952. &sysfs_io_errors,
  953. &sysfs_clear_stats,
  954. &sysfs_cache_replacement_policy,
  955. NULL
  956. };
  957. KTYPE(bch_cache);