operation.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612
  1. /* FS-Cache worker operation management routines
  2. *
  3. * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. * See Documentation/filesystems/caching/operations.txt
  12. */
  13. #define FSCACHE_DEBUG_LEVEL OPERATION
  14. #include <linux/module.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. atomic_t fscache_op_debug_id;
  19. EXPORT_SYMBOL(fscache_op_debug_id);
  20. static void fscache_operation_dummy_cancel(struct fscache_operation *op)
  21. {
  22. }
  23. /**
  24. * fscache_operation_init - Do basic initialisation of an operation
  25. * @op: The operation to initialise
  26. * @release: The release function to assign
  27. *
  28. * Do basic initialisation of an operation. The caller must still set flags,
  29. * object and processor if needed.
  30. */
  31. void fscache_operation_init(struct fscache_operation *op,
  32. fscache_operation_processor_t processor,
  33. fscache_operation_cancel_t cancel,
  34. fscache_operation_release_t release)
  35. {
  36. INIT_WORK(&op->work, fscache_op_work_func);
  37. atomic_set(&op->usage, 1);
  38. op->state = FSCACHE_OP_ST_INITIALISED;
  39. op->debug_id = atomic_inc_return(&fscache_op_debug_id);
  40. op->processor = processor;
  41. op->cancel = cancel ?: fscache_operation_dummy_cancel;
  42. op->release = release;
  43. INIT_LIST_HEAD(&op->pend_link);
  44. fscache_stat(&fscache_n_op_initialised);
  45. }
  46. EXPORT_SYMBOL(fscache_operation_init);
  47. /**
  48. * fscache_enqueue_operation - Enqueue an operation for processing
  49. * @op: The operation to enqueue
  50. *
  51. * Enqueue an operation for processing by the FS-Cache thread pool.
  52. *
  53. * This will get its own ref on the object.
  54. */
  55. void fscache_enqueue_operation(struct fscache_operation *op)
  56. {
  57. _enter("{OBJ%x OP%x,%u}",
  58. op->object->debug_id, op->debug_id, atomic_read(&op->usage));
  59. ASSERT(list_empty(&op->pend_link));
  60. ASSERT(op->processor != NULL);
  61. ASSERT(fscache_object_is_available(op->object));
  62. ASSERTCMP(atomic_read(&op->usage), >, 0);
  63. ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
  64. fscache_stat(&fscache_n_op_enqueue);
  65. switch (op->flags & FSCACHE_OP_TYPE) {
  66. case FSCACHE_OP_ASYNC:
  67. _debug("queue async");
  68. atomic_inc(&op->usage);
  69. if (!queue_work(fscache_op_wq, &op->work))
  70. fscache_put_operation(op);
  71. break;
  72. case FSCACHE_OP_MYTHREAD:
  73. _debug("queue for caller's attention");
  74. break;
  75. default:
  76. pr_err("Unexpected op type %lx", op->flags);
  77. BUG();
  78. break;
  79. }
  80. }
  81. EXPORT_SYMBOL(fscache_enqueue_operation);
  82. /*
  83. * start an op running
  84. */
  85. static void fscache_run_op(struct fscache_object *object,
  86. struct fscache_operation *op)
  87. {
  88. ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
  89. op->state = FSCACHE_OP_ST_IN_PROGRESS;
  90. object->n_in_progress++;
  91. if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
  92. wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
  93. if (op->processor)
  94. fscache_enqueue_operation(op);
  95. fscache_stat(&fscache_n_op_run);
  96. }
  97. /*
  98. * report an unexpected submission
  99. */
  100. static void fscache_report_unexpected_submission(struct fscache_object *object,
  101. struct fscache_operation *op,
  102. const struct fscache_state *ostate)
  103. {
  104. static bool once_only;
  105. struct fscache_operation *p;
  106. unsigned n;
  107. if (once_only)
  108. return;
  109. once_only = true;
  110. kdebug("unexpected submission OP%x [OBJ%x %s]",
  111. op->debug_id, object->debug_id, object->state->name);
  112. kdebug("objstate=%s [%s]", object->state->name, ostate->name);
  113. kdebug("objflags=%lx", object->flags);
  114. kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
  115. kdebug("ops=%u inp=%u exc=%u",
  116. object->n_ops, object->n_in_progress, object->n_exclusive);
  117. if (!list_empty(&object->pending_ops)) {
  118. n = 0;
  119. list_for_each_entry(p, &object->pending_ops, pend_link) {
  120. ASSERTCMP(p->object, ==, object);
  121. kdebug("%p %p", op->processor, op->release);
  122. n++;
  123. }
  124. kdebug("n=%u", n);
  125. }
  126. dump_stack();
  127. }
  128. /*
  129. * submit an exclusive operation for an object
  130. * - other ops are excluded from running simultaneously with this one
  131. * - this gets any extra refs it needs on an op
  132. */
  133. int fscache_submit_exclusive_op(struct fscache_object *object,
  134. struct fscache_operation *op)
  135. {
  136. const struct fscache_state *ostate;
  137. unsigned long flags;
  138. int ret;
  139. _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
  140. ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
  141. ASSERTCMP(atomic_read(&op->usage), >, 0);
  142. spin_lock(&object->lock);
  143. ASSERTCMP(object->n_ops, >=, object->n_in_progress);
  144. ASSERTCMP(object->n_ops, >=, object->n_exclusive);
  145. ASSERT(list_empty(&op->pend_link));
  146. ostate = object->state;
  147. smp_rmb();
  148. op->state = FSCACHE_OP_ST_PENDING;
  149. flags = READ_ONCE(object->flags);
  150. if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
  151. fscache_stat(&fscache_n_op_rejected);
  152. op->cancel(op);
  153. op->state = FSCACHE_OP_ST_CANCELLED;
  154. ret = -ENOBUFS;
  155. } else if (unlikely(fscache_cache_is_broken(object))) {
  156. op->cancel(op);
  157. op->state = FSCACHE_OP_ST_CANCELLED;
  158. ret = -EIO;
  159. } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
  160. op->object = object;
  161. object->n_ops++;
  162. object->n_exclusive++; /* reads and writes must wait */
  163. if (object->n_in_progress > 0) {
  164. atomic_inc(&op->usage);
  165. list_add_tail(&op->pend_link, &object->pending_ops);
  166. fscache_stat(&fscache_n_op_pend);
  167. } else if (!list_empty(&object->pending_ops)) {
  168. atomic_inc(&op->usage);
  169. list_add_tail(&op->pend_link, &object->pending_ops);
  170. fscache_stat(&fscache_n_op_pend);
  171. fscache_start_operations(object);
  172. } else {
  173. ASSERTCMP(object->n_in_progress, ==, 0);
  174. fscache_run_op(object, op);
  175. }
  176. /* need to issue a new write op after this */
  177. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  178. ret = 0;
  179. } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
  180. op->object = object;
  181. object->n_ops++;
  182. object->n_exclusive++; /* reads and writes must wait */
  183. atomic_inc(&op->usage);
  184. list_add_tail(&op->pend_link, &object->pending_ops);
  185. fscache_stat(&fscache_n_op_pend);
  186. ret = 0;
  187. } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
  188. op->cancel(op);
  189. op->state = FSCACHE_OP_ST_CANCELLED;
  190. ret = -ENOBUFS;
  191. } else {
  192. fscache_report_unexpected_submission(object, op, ostate);
  193. op->cancel(op);
  194. op->state = FSCACHE_OP_ST_CANCELLED;
  195. ret = -ENOBUFS;
  196. }
  197. spin_unlock(&object->lock);
  198. return ret;
  199. }
  200. /*
  201. * submit an operation for an object
  202. * - objects may be submitted only in the following states:
  203. * - during object creation (write ops may be submitted)
  204. * - whilst the object is active
  205. * - after an I/O error incurred in one of the two above states (op rejected)
  206. * - this gets any extra refs it needs on an op
  207. */
  208. int fscache_submit_op(struct fscache_object *object,
  209. struct fscache_operation *op)
  210. {
  211. const struct fscache_state *ostate;
  212. unsigned long flags;
  213. int ret;
  214. _enter("{OBJ%x OP%x},{%u}",
  215. object->debug_id, op->debug_id, atomic_read(&op->usage));
  216. ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
  217. ASSERTCMP(atomic_read(&op->usage), >, 0);
  218. spin_lock(&object->lock);
  219. ASSERTCMP(object->n_ops, >=, object->n_in_progress);
  220. ASSERTCMP(object->n_ops, >=, object->n_exclusive);
  221. ASSERT(list_empty(&op->pend_link));
  222. ostate = object->state;
  223. smp_rmb();
  224. op->state = FSCACHE_OP_ST_PENDING;
  225. flags = READ_ONCE(object->flags);
  226. if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
  227. fscache_stat(&fscache_n_op_rejected);
  228. op->cancel(op);
  229. op->state = FSCACHE_OP_ST_CANCELLED;
  230. ret = -ENOBUFS;
  231. } else if (unlikely(fscache_cache_is_broken(object))) {
  232. op->cancel(op);
  233. op->state = FSCACHE_OP_ST_CANCELLED;
  234. ret = -EIO;
  235. } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
  236. op->object = object;
  237. object->n_ops++;
  238. if (object->n_exclusive > 0) {
  239. atomic_inc(&op->usage);
  240. list_add_tail(&op->pend_link, &object->pending_ops);
  241. fscache_stat(&fscache_n_op_pend);
  242. } else if (!list_empty(&object->pending_ops)) {
  243. atomic_inc(&op->usage);
  244. list_add_tail(&op->pend_link, &object->pending_ops);
  245. fscache_stat(&fscache_n_op_pend);
  246. fscache_start_operations(object);
  247. } else {
  248. ASSERTCMP(object->n_exclusive, ==, 0);
  249. fscache_run_op(object, op);
  250. }
  251. ret = 0;
  252. } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
  253. op->object = object;
  254. object->n_ops++;
  255. atomic_inc(&op->usage);
  256. list_add_tail(&op->pend_link, &object->pending_ops);
  257. fscache_stat(&fscache_n_op_pend);
  258. ret = 0;
  259. } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
  260. op->cancel(op);
  261. op->state = FSCACHE_OP_ST_CANCELLED;
  262. ret = -ENOBUFS;
  263. } else {
  264. fscache_report_unexpected_submission(object, op, ostate);
  265. ASSERT(!fscache_object_is_active(object));
  266. op->cancel(op);
  267. op->state = FSCACHE_OP_ST_CANCELLED;
  268. ret = -ENOBUFS;
  269. }
  270. spin_unlock(&object->lock);
  271. return ret;
  272. }
  273. /*
  274. * queue an object for withdrawal on error, aborting all following asynchronous
  275. * operations
  276. */
  277. void fscache_abort_object(struct fscache_object *object)
  278. {
  279. _enter("{OBJ%x}", object->debug_id);
  280. fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
  281. }
  282. /*
  283. * Jump start the operation processing on an object. The caller must hold
  284. * object->lock.
  285. */
  286. void fscache_start_operations(struct fscache_object *object)
  287. {
  288. struct fscache_operation *op;
  289. bool stop = false;
  290. while (!list_empty(&object->pending_ops) && !stop) {
  291. op = list_entry(object->pending_ops.next,
  292. struct fscache_operation, pend_link);
  293. if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
  294. if (object->n_in_progress > 0)
  295. break;
  296. stop = true;
  297. }
  298. list_del_init(&op->pend_link);
  299. fscache_run_op(object, op);
  300. /* the pending queue was holding a ref on the object */
  301. fscache_put_operation(op);
  302. }
  303. ASSERTCMP(object->n_in_progress, <=, object->n_ops);
  304. _debug("woke %d ops on OBJ%x",
  305. object->n_in_progress, object->debug_id);
  306. }
  307. /*
  308. * cancel an operation that's pending on an object
  309. */
  310. int fscache_cancel_op(struct fscache_operation *op,
  311. bool cancel_in_progress_op)
  312. {
  313. struct fscache_object *object = op->object;
  314. bool put = false;
  315. int ret;
  316. _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
  317. ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING);
  318. ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED);
  319. ASSERTCMP(atomic_read(&op->usage), >, 0);
  320. spin_lock(&object->lock);
  321. ret = -EBUSY;
  322. if (op->state == FSCACHE_OP_ST_PENDING) {
  323. ASSERT(!list_empty(&op->pend_link));
  324. list_del_init(&op->pend_link);
  325. put = true;
  326. fscache_stat(&fscache_n_op_cancelled);
  327. op->cancel(op);
  328. op->state = FSCACHE_OP_ST_CANCELLED;
  329. if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
  330. object->n_exclusive--;
  331. if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
  332. wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
  333. ret = 0;
  334. } else if (op->state == FSCACHE_OP_ST_IN_PROGRESS && cancel_in_progress_op) {
  335. ASSERTCMP(object->n_in_progress, >, 0);
  336. if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
  337. object->n_exclusive--;
  338. object->n_in_progress--;
  339. if (object->n_in_progress == 0)
  340. fscache_start_operations(object);
  341. fscache_stat(&fscache_n_op_cancelled);
  342. op->cancel(op);
  343. op->state = FSCACHE_OP_ST_CANCELLED;
  344. if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
  345. object->n_exclusive--;
  346. if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
  347. wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
  348. ret = 0;
  349. }
  350. if (put)
  351. fscache_put_operation(op);
  352. spin_unlock(&object->lock);
  353. _leave(" = %d", ret);
  354. return ret;
  355. }
  356. /*
  357. * Cancel all pending operations on an object
  358. */
  359. void fscache_cancel_all_ops(struct fscache_object *object)
  360. {
  361. struct fscache_operation *op;
  362. _enter("OBJ%x", object->debug_id);
  363. spin_lock(&object->lock);
  364. while (!list_empty(&object->pending_ops)) {
  365. op = list_entry(object->pending_ops.next,
  366. struct fscache_operation, pend_link);
  367. fscache_stat(&fscache_n_op_cancelled);
  368. list_del_init(&op->pend_link);
  369. ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
  370. op->cancel(op);
  371. op->state = FSCACHE_OP_ST_CANCELLED;
  372. if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
  373. object->n_exclusive--;
  374. if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
  375. wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
  376. fscache_put_operation(op);
  377. cond_resched_lock(&object->lock);
  378. }
  379. spin_unlock(&object->lock);
  380. _leave("");
  381. }
  382. /*
  383. * Record the completion or cancellation of an in-progress operation.
  384. */
  385. void fscache_op_complete(struct fscache_operation *op, bool cancelled)
  386. {
  387. struct fscache_object *object = op->object;
  388. _enter("OBJ%x", object->debug_id);
  389. ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
  390. ASSERTCMP(object->n_in_progress, >, 0);
  391. ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
  392. object->n_exclusive, >, 0);
  393. ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
  394. object->n_in_progress, ==, 1);
  395. spin_lock(&object->lock);
  396. if (!cancelled) {
  397. op->state = FSCACHE_OP_ST_COMPLETE;
  398. } else {
  399. op->cancel(op);
  400. op->state = FSCACHE_OP_ST_CANCELLED;
  401. }
  402. if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
  403. object->n_exclusive--;
  404. object->n_in_progress--;
  405. if (object->n_in_progress == 0)
  406. fscache_start_operations(object);
  407. spin_unlock(&object->lock);
  408. _leave("");
  409. }
  410. EXPORT_SYMBOL(fscache_op_complete);
  411. /*
  412. * release an operation
  413. * - queues pending ops if this is the last in-progress op
  414. */
  415. void fscache_put_operation(struct fscache_operation *op)
  416. {
  417. struct fscache_object *object;
  418. struct fscache_cache *cache;
  419. _enter("{OBJ%x OP%x,%d}",
  420. op->object->debug_id, op->debug_id, atomic_read(&op->usage));
  421. ASSERTCMP(atomic_read(&op->usage), >, 0);
  422. if (!atomic_dec_and_test(&op->usage))
  423. return;
  424. _debug("PUT OP");
  425. ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED &&
  426. op->state != FSCACHE_OP_ST_COMPLETE,
  427. op->state, ==, FSCACHE_OP_ST_CANCELLED);
  428. fscache_stat(&fscache_n_op_release);
  429. if (op->release) {
  430. op->release(op);
  431. op->release = NULL;
  432. }
  433. op->state = FSCACHE_OP_ST_DEAD;
  434. object = op->object;
  435. if (likely(object)) {
  436. if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
  437. atomic_dec(&object->n_reads);
  438. if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
  439. fscache_unuse_cookie(object);
  440. /* now... we may get called with the object spinlock held, so we
  441. * complete the cleanup here only if we can immediately acquire the
  442. * lock, and defer it otherwise */
  443. if (!spin_trylock(&object->lock)) {
  444. _debug("defer put");
  445. fscache_stat(&fscache_n_op_deferred_release);
  446. cache = object->cache;
  447. spin_lock(&cache->op_gc_list_lock);
  448. list_add_tail(&op->pend_link, &cache->op_gc_list);
  449. spin_unlock(&cache->op_gc_list_lock);
  450. schedule_work(&cache->op_gc);
  451. _leave(" [defer]");
  452. return;
  453. }
  454. ASSERTCMP(object->n_ops, >, 0);
  455. object->n_ops--;
  456. if (object->n_ops == 0)
  457. fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
  458. spin_unlock(&object->lock);
  459. }
  460. kfree(op);
  461. _leave(" [done]");
  462. }
  463. EXPORT_SYMBOL(fscache_put_operation);
  464. /*
  465. * garbage collect operations that have had their release deferred
  466. */
  467. void fscache_operation_gc(struct work_struct *work)
  468. {
  469. struct fscache_operation *op;
  470. struct fscache_object *object;
  471. struct fscache_cache *cache =
  472. container_of(work, struct fscache_cache, op_gc);
  473. int count = 0;
  474. _enter("");
  475. do {
  476. spin_lock(&cache->op_gc_list_lock);
  477. if (list_empty(&cache->op_gc_list)) {
  478. spin_unlock(&cache->op_gc_list_lock);
  479. break;
  480. }
  481. op = list_entry(cache->op_gc_list.next,
  482. struct fscache_operation, pend_link);
  483. list_del(&op->pend_link);
  484. spin_unlock(&cache->op_gc_list_lock);
  485. object = op->object;
  486. spin_lock(&object->lock);
  487. _debug("GC DEFERRED REL OBJ%x OP%x",
  488. object->debug_id, op->debug_id);
  489. fscache_stat(&fscache_n_op_gc);
  490. ASSERTCMP(atomic_read(&op->usage), ==, 0);
  491. ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
  492. ASSERTCMP(object->n_ops, >, 0);
  493. object->n_ops--;
  494. if (object->n_ops == 0)
  495. fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
  496. spin_unlock(&object->lock);
  497. kfree(op);
  498. } while (count++ < 20);
  499. if (!list_empty(&cache->op_gc_list))
  500. schedule_work(&cache->op_gc);
  501. _leave("");
  502. }
  503. /*
  504. * execute an operation using fs_op_wq to provide processing context -
  505. * the caller holds a ref to this object, so we don't need to hold one
  506. */
  507. void fscache_op_work_func(struct work_struct *work)
  508. {
  509. struct fscache_operation *op =
  510. container_of(work, struct fscache_operation, work);
  511. unsigned long start;
  512. _enter("{OBJ%x OP%x,%d}",
  513. op->object->debug_id, op->debug_id, atomic_read(&op->usage));
  514. ASSERT(op->processor != NULL);
  515. start = jiffies;
  516. op->processor(op);
  517. fscache_hist(fscache_ops_histogram, start);
  518. fscache_put_operation(op);
  519. _leave("");
  520. }