object.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108
  1. /* FS-Cache object state machine handler
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. * See Documentation/filesystems/caching/object.txt for a description of the
  12. * object state machine and the in-kernel representations.
  13. */
  14. #define FSCACHE_DEBUG_LEVEL COOKIE
  15. #include <linux/module.h>
  16. #include <linux/slab.h>
  17. #include <linux/prefetch.h>
  18. #include "internal.h"
  19. static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int);
  20. static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int);
  21. static const struct fscache_state *fscache_drop_object(struct fscache_object *, int);
  22. static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int);
  23. static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int);
  24. static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int);
  25. static const struct fscache_state *fscache_kill_object(struct fscache_object *, int);
  26. static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int);
  27. static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int);
  28. static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
  29. static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
  30. static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
  31. static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
  32. #define __STATE_NAME(n) fscache_osm_##n
  33. #define STATE(n) (&__STATE_NAME(n))
  34. /*
  35. * Define a work state. Work states are execution states. No event processing
  36. * is performed by them. The function attached to a work state returns a
  37. * pointer indicating the next state to which the state machine should
  38. * transition. Returning NO_TRANSIT repeats the current state, but goes back
  39. * to the scheduler first.
  40. */
  41. #define WORK_STATE(n, sn, f) \
  42. const struct fscache_state __STATE_NAME(n) = { \
  43. .name = #n, \
  44. .short_name = sn, \
  45. .work = f \
  46. }
  47. /*
  48. * Returns from work states.
  49. */
  50. #define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); })
  51. #define NO_TRANSIT ((struct fscache_state *)NULL)
  52. /*
  53. * Define a wait state. Wait states are event processing states. No execution
  54. * is performed by them. Wait states are just tables of "if event X occurs,
  55. * clear it and transition to state Y". The dispatcher returns to the
  56. * scheduler if none of the events in which the wait state has an interest are
  57. * currently pending.
  58. */
  59. #define WAIT_STATE(n, sn, ...) \
  60. const struct fscache_state __STATE_NAME(n) = { \
  61. .name = #n, \
  62. .short_name = sn, \
  63. .work = NULL, \
  64. .transitions = { __VA_ARGS__, { 0, NULL } } \
  65. }
  66. #define TRANSIT_TO(state, emask) \
  67. { .events = (emask), .transit_to = STATE(state) }
  68. /*
  69. * The object state machine.
  70. */
  71. static WORK_STATE(INIT_OBJECT, "INIT", fscache_initialise_object);
  72. static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready);
  73. static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation);
  74. static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object);
  75. static WORK_STATE(CREATE_OBJECT, "CRTO", fscache_look_up_object);
  76. static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available);
  77. static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents);
  78. static WORK_STATE(INVALIDATE_OBJECT, "INVL", fscache_invalidate_object);
  79. static WORK_STATE(UPDATE_OBJECT, "UPDT", fscache_update_object);
  80. static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
  81. static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
  82. static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
  83. static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
  84. static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
  85. static WAIT_STATE(WAIT_FOR_INIT, "?INI",
  86. TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
  87. static WAIT_STATE(WAIT_FOR_PARENT, "?PRN",
  88. TRANSIT_TO(PARENT_READY, 1 << FSCACHE_OBJECT_EV_PARENT_READY));
  89. static WAIT_STATE(WAIT_FOR_CMD, "?CMD",
  90. TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE),
  91. TRANSIT_TO(UPDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_UPDATE),
  92. TRANSIT_TO(JUMPSTART_DEPS, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
  93. static WAIT_STATE(WAIT_FOR_CLEARANCE, "?CLR",
  94. TRANSIT_TO(KILL_OBJECT, 1 << FSCACHE_OBJECT_EV_CLEARED));
  95. /*
  96. * Out-of-band event transition tables. These are for handling unexpected
  97. * events, such as an I/O error. If an OOB event occurs, the state machine
  98. * clears and disables the event and forces a transition to the nominated work
  99. * state (acurrently executing work states will complete first).
  100. *
  101. * In such a situation, object->state remembers the state the machine should
  102. * have been in/gone to and returning NO_TRANSIT returns to that.
  103. */
  104. static const struct fscache_transition fscache_osm_init_oob[] = {
  105. TRANSIT_TO(ABORT_INIT,
  106. (1 << FSCACHE_OBJECT_EV_ERROR) |
  107. (1 << FSCACHE_OBJECT_EV_KILL)),
  108. { 0, NULL }
  109. };
  110. static const struct fscache_transition fscache_osm_lookup_oob[] = {
  111. TRANSIT_TO(LOOKUP_FAILURE,
  112. (1 << FSCACHE_OBJECT_EV_ERROR) |
  113. (1 << FSCACHE_OBJECT_EV_KILL)),
  114. { 0, NULL }
  115. };
  116. static const struct fscache_transition fscache_osm_run_oob[] = {
  117. TRANSIT_TO(KILL_OBJECT,
  118. (1 << FSCACHE_OBJECT_EV_ERROR) |
  119. (1 << FSCACHE_OBJECT_EV_KILL)),
  120. { 0, NULL }
  121. };
  122. static int fscache_get_object(struct fscache_object *);
  123. static void fscache_put_object(struct fscache_object *);
  124. static bool fscache_enqueue_dependents(struct fscache_object *, int);
  125. static void fscache_dequeue_object(struct fscache_object *);
  126. /*
  127. * we need to notify the parent when an op completes that we had outstanding
  128. * upon it
  129. */
  130. static inline void fscache_done_parent_op(struct fscache_object *object)
  131. {
  132. struct fscache_object *parent = object->parent;
  133. _enter("OBJ%x {OBJ%x,%x}",
  134. object->debug_id, parent->debug_id, parent->n_ops);
  135. spin_lock_nested(&parent->lock, 1);
  136. parent->n_obj_ops--;
  137. parent->n_ops--;
  138. if (parent->n_ops == 0)
  139. fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
  140. spin_unlock(&parent->lock);
  141. }
  142. /*
  143. * Object state machine dispatcher.
  144. */
  145. static void fscache_object_sm_dispatcher(struct fscache_object *object)
  146. {
  147. const struct fscache_transition *t;
  148. const struct fscache_state *state, *new_state;
  149. unsigned long events, event_mask;
  150. int event = -1;
  151. ASSERT(object != NULL);
  152. _enter("{OBJ%x,%s,%lx}",
  153. object->debug_id, object->state->name, object->events);
  154. event_mask = object->event_mask;
  155. restart:
  156. object->event_mask = 0; /* Mask normal event handling */
  157. state = object->state;
  158. restart_masked:
  159. events = object->events;
  160. /* Handle any out-of-band events (typically an error) */
  161. if (events & object->oob_event_mask) {
  162. _debug("{OBJ%x} oob %lx",
  163. object->debug_id, events & object->oob_event_mask);
  164. for (t = object->oob_table; t->events; t++) {
  165. if (events & t->events) {
  166. state = t->transit_to;
  167. ASSERT(state->work != NULL);
  168. event = fls(events & t->events) - 1;
  169. __clear_bit(event, &object->oob_event_mask);
  170. clear_bit(event, &object->events);
  171. goto execute_work_state;
  172. }
  173. }
  174. }
  175. /* Wait states are just transition tables */
  176. if (!state->work) {
  177. if (events & event_mask) {
  178. for (t = state->transitions; t->events; t++) {
  179. if (events & t->events) {
  180. new_state = t->transit_to;
  181. event = fls(events & t->events) - 1;
  182. clear_bit(event, &object->events);
  183. _debug("{OBJ%x} ev %d: %s -> %s",
  184. object->debug_id, event,
  185. state->name, new_state->name);
  186. object->state = state = new_state;
  187. goto execute_work_state;
  188. }
  189. }
  190. /* The event mask didn't include all the tabled bits */
  191. BUG();
  192. }
  193. /* Randomly woke up */
  194. goto unmask_events;
  195. }
  196. execute_work_state:
  197. _debug("{OBJ%x} exec %s", object->debug_id, state->name);
  198. new_state = state->work(object, event);
  199. event = -1;
  200. if (new_state == NO_TRANSIT) {
  201. _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
  202. if (unlikely(state == STATE(OBJECT_DEAD))) {
  203. _leave(" [dead]");
  204. return;
  205. }
  206. fscache_enqueue_object(object);
  207. event_mask = object->oob_event_mask;
  208. goto unmask_events;
  209. }
  210. _debug("{OBJ%x} %s -> %s",
  211. object->debug_id, state->name, new_state->name);
  212. object->state = state = new_state;
  213. if (state->work) {
  214. if (unlikely(state == STATE(OBJECT_DEAD))) {
  215. _leave(" [dead]");
  216. return;
  217. }
  218. goto restart_masked;
  219. }
  220. /* Transited to wait state */
  221. event_mask = object->oob_event_mask;
  222. for (t = state->transitions; t->events; t++)
  223. event_mask |= t->events;
  224. unmask_events:
  225. object->event_mask = event_mask;
  226. smp_mb();
  227. events = object->events;
  228. if (events & event_mask)
  229. goto restart;
  230. _leave(" [msk %lx]", event_mask);
  231. }
  232. /*
  233. * execute an object
  234. */
  235. static void fscache_object_work_func(struct work_struct *work)
  236. {
  237. struct fscache_object *object =
  238. container_of(work, struct fscache_object, work);
  239. unsigned long start;
  240. _enter("{OBJ%x}", object->debug_id);
  241. start = jiffies;
  242. fscache_object_sm_dispatcher(object);
  243. fscache_hist(fscache_objs_histogram, start);
  244. fscache_put_object(object);
  245. }
  246. /**
  247. * fscache_object_init - Initialise a cache object description
  248. * @object: Object description
  249. * @cookie: Cookie object will be attached to
  250. * @cache: Cache in which backing object will be found
  251. *
  252. * Initialise a cache object description to its basic values.
  253. *
  254. * See Documentation/filesystems/caching/backend-api.txt for a complete
  255. * description.
  256. */
  257. void fscache_object_init(struct fscache_object *object,
  258. struct fscache_cookie *cookie,
  259. struct fscache_cache *cache)
  260. {
  261. const struct fscache_transition *t;
  262. atomic_inc(&cache->object_count);
  263. object->state = STATE(WAIT_FOR_INIT);
  264. object->oob_table = fscache_osm_init_oob;
  265. object->flags = 1 << FSCACHE_OBJECT_IS_LIVE;
  266. spin_lock_init(&object->lock);
  267. INIT_LIST_HEAD(&object->cache_link);
  268. INIT_HLIST_NODE(&object->cookie_link);
  269. INIT_WORK(&object->work, fscache_object_work_func);
  270. INIT_LIST_HEAD(&object->dependents);
  271. INIT_LIST_HEAD(&object->dep_link);
  272. INIT_LIST_HEAD(&object->pending_ops);
  273. object->n_children = 0;
  274. object->n_ops = object->n_in_progress = object->n_exclusive = 0;
  275. object->events = 0;
  276. object->store_limit = 0;
  277. object->store_limit_l = 0;
  278. object->cache = cache;
  279. object->cookie = cookie;
  280. object->parent = NULL;
  281. #ifdef CONFIG_FSCACHE_OBJECT_LIST
  282. RB_CLEAR_NODE(&object->objlist_link);
  283. #endif
  284. object->oob_event_mask = 0;
  285. for (t = object->oob_table; t->events; t++)
  286. object->oob_event_mask |= t->events;
  287. object->event_mask = object->oob_event_mask;
  288. for (t = object->state->transitions; t->events; t++)
  289. object->event_mask |= t->events;
  290. }
  291. EXPORT_SYMBOL(fscache_object_init);
  292. /*
  293. * Mark the object as no longer being live, making sure that we synchronise
  294. * against op submission.
  295. */
  296. static inline void fscache_mark_object_dead(struct fscache_object *object)
  297. {
  298. spin_lock(&object->lock);
  299. clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
  300. spin_unlock(&object->lock);
  301. }
  302. /*
  303. * Abort object initialisation before we start it.
  304. */
  305. static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object,
  306. int event)
  307. {
  308. _enter("{OBJ%x},%d", object->debug_id, event);
  309. object->oob_event_mask = 0;
  310. fscache_dequeue_object(object);
  311. return transit_to(KILL_OBJECT);
  312. }
  313. /*
  314. * initialise an object
  315. * - check the specified object's parent to see if we can make use of it
  316. * immediately to do a creation
  317. * - we may need to start the process of creating a parent and we need to wait
  318. * for the parent's lookup and creation to complete if it's not there yet
  319. */
  320. static const struct fscache_state *fscache_initialise_object(struct fscache_object *object,
  321. int event)
  322. {
  323. struct fscache_object *parent;
  324. bool success;
  325. _enter("{OBJ%x},%d", object->debug_id, event);
  326. ASSERT(list_empty(&object->dep_link));
  327. parent = object->parent;
  328. if (!parent) {
  329. _leave(" [no parent]");
  330. return transit_to(DROP_OBJECT);
  331. }
  332. _debug("parent: %s of:%lx", parent->state->name, parent->flags);
  333. if (fscache_object_is_dying(parent)) {
  334. _leave(" [bad parent]");
  335. return transit_to(DROP_OBJECT);
  336. }
  337. if (fscache_object_is_available(parent)) {
  338. _leave(" [ready]");
  339. return transit_to(PARENT_READY);
  340. }
  341. _debug("wait");
  342. spin_lock(&parent->lock);
  343. fscache_stat(&fscache_n_cop_grab_object);
  344. success = false;
  345. if (fscache_object_is_live(parent) &&
  346. object->cache->ops->grab_object(object)) {
  347. list_add(&object->dep_link, &parent->dependents);
  348. success = true;
  349. }
  350. fscache_stat_d(&fscache_n_cop_grab_object);
  351. spin_unlock(&parent->lock);
  352. if (!success) {
  353. _leave(" [grab failed]");
  354. return transit_to(DROP_OBJECT);
  355. }
  356. /* fscache_acquire_non_index_cookie() uses this
  357. * to wake the chain up */
  358. fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD);
  359. _leave(" [wait]");
  360. return transit_to(WAIT_FOR_PARENT);
  361. }
  362. /*
  363. * Once the parent object is ready, we should kick off our lookup op.
  364. */
  365. static const struct fscache_state *fscache_parent_ready(struct fscache_object *object,
  366. int event)
  367. {
  368. struct fscache_object *parent = object->parent;
  369. _enter("{OBJ%x},%d", object->debug_id, event);
  370. ASSERT(parent != NULL);
  371. spin_lock(&parent->lock);
  372. parent->n_ops++;
  373. parent->n_obj_ops++;
  374. object->lookup_jif = jiffies;
  375. spin_unlock(&parent->lock);
  376. _leave("");
  377. return transit_to(LOOK_UP_OBJECT);
  378. }
  379. /*
  380. * look an object up in the cache from which it was allocated
  381. * - we hold an "access lock" on the parent object, so the parent object cannot
  382. * be withdrawn by either party till we've finished
  383. */
  384. static const struct fscache_state *fscache_look_up_object(struct fscache_object *object,
  385. int event)
  386. {
  387. struct fscache_cookie *cookie = object->cookie;
  388. struct fscache_object *parent = object->parent;
  389. int ret;
  390. _enter("{OBJ%x},%d", object->debug_id, event);
  391. object->oob_table = fscache_osm_lookup_oob;
  392. ASSERT(parent != NULL);
  393. ASSERTCMP(parent->n_ops, >, 0);
  394. ASSERTCMP(parent->n_obj_ops, >, 0);
  395. /* make sure the parent is still available */
  396. ASSERT(fscache_object_is_available(parent));
  397. if (fscache_object_is_dying(parent) ||
  398. test_bit(FSCACHE_IOERROR, &object->cache->flags) ||
  399. !fscache_use_cookie(object)) {
  400. _leave(" [unavailable]");
  401. return transit_to(LOOKUP_FAILURE);
  402. }
  403. _debug("LOOKUP \"%s\" in \"%s\"",
  404. cookie->def->name, object->cache->tag->name);
  405. fscache_stat(&fscache_n_object_lookups);
  406. fscache_stat(&fscache_n_cop_lookup_object);
  407. ret = object->cache->ops->lookup_object(object);
  408. fscache_stat_d(&fscache_n_cop_lookup_object);
  409. fscache_unuse_cookie(object);
  410. if (ret == -ETIMEDOUT) {
  411. /* probably stuck behind another object, so move this one to
  412. * the back of the queue */
  413. fscache_stat(&fscache_n_object_lookups_timed_out);
  414. _leave(" [timeout]");
  415. return NO_TRANSIT;
  416. }
  417. if (ret < 0) {
  418. _leave(" [error]");
  419. return transit_to(LOOKUP_FAILURE);
  420. }
  421. _leave(" [ok]");
  422. return transit_to(OBJECT_AVAILABLE);
  423. }
  424. /**
  425. * fscache_object_lookup_negative - Note negative cookie lookup
  426. * @object: Object pointing to cookie to mark
  427. *
  428. * Note negative lookup, permitting those waiting to read data from an already
  429. * existing backing object to continue as there's no data for them to read.
  430. */
  431. void fscache_object_lookup_negative(struct fscache_object *object)
  432. {
  433. struct fscache_cookie *cookie = object->cookie;
  434. _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
  435. if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
  436. fscache_stat(&fscache_n_object_lookups_negative);
  437. /* Allow write requests to begin stacking up and read requests to begin
  438. * returning ENODATA.
  439. */
  440. set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  441. clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
  442. _debug("wake up lookup %p", &cookie->flags);
  443. clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
  444. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
  445. }
  446. _leave("");
  447. }
  448. EXPORT_SYMBOL(fscache_object_lookup_negative);
  449. /**
  450. * fscache_obtained_object - Note successful object lookup or creation
  451. * @object: Object pointing to cookie to mark
  452. *
  453. * Note successful lookup and/or creation, permitting those waiting to write
  454. * data to a backing object to continue.
  455. *
  456. * Note that after calling this, an object's cookie may be relinquished by the
  457. * netfs, and so must be accessed with object lock held.
  458. */
  459. void fscache_obtained_object(struct fscache_object *object)
  460. {
  461. struct fscache_cookie *cookie = object->cookie;
  462. _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
  463. /* if we were still looking up, then we must have a positive lookup
  464. * result, in which case there may be data available */
  465. if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
  466. fscache_stat(&fscache_n_object_lookups_positive);
  467. /* We do (presumably) have data */
  468. clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  469. clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
  470. /* Allow write requests to begin stacking up and read requests
  471. * to begin shovelling data.
  472. */
  473. clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
  474. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
  475. } else {
  476. fscache_stat(&fscache_n_object_created);
  477. }
  478. set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
  479. _leave("");
  480. }
  481. EXPORT_SYMBOL(fscache_obtained_object);
  482. /*
  483. * handle an object that has just become available
  484. */
  485. static const struct fscache_state *fscache_object_available(struct fscache_object *object,
  486. int event)
  487. {
  488. _enter("{OBJ%x},%d", object->debug_id, event);
  489. object->oob_table = fscache_osm_run_oob;
  490. spin_lock(&object->lock);
  491. fscache_done_parent_op(object);
  492. if (object->n_in_progress == 0) {
  493. if (object->n_ops > 0) {
  494. ASSERTCMP(object->n_ops, >=, object->n_obj_ops);
  495. fscache_start_operations(object);
  496. } else {
  497. ASSERT(list_empty(&object->pending_ops));
  498. }
  499. }
  500. spin_unlock(&object->lock);
  501. fscache_stat(&fscache_n_cop_lookup_complete);
  502. object->cache->ops->lookup_complete(object);
  503. fscache_stat_d(&fscache_n_cop_lookup_complete);
  504. fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
  505. fscache_stat(&fscache_n_object_avail);
  506. _leave("");
  507. return transit_to(JUMPSTART_DEPS);
  508. }
  509. /*
  510. * Wake up this object's dependent objects now that we've become available.
  511. */
  512. static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object,
  513. int event)
  514. {
  515. _enter("{OBJ%x},%d", object->debug_id, event);
  516. if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY))
  517. return NO_TRANSIT; /* Not finished; requeue */
  518. return transit_to(WAIT_FOR_CMD);
  519. }
  520. /*
  521. * Handle lookup or creation failute.
  522. */
  523. static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object,
  524. int event)
  525. {
  526. struct fscache_cookie *cookie;
  527. _enter("{OBJ%x},%d", object->debug_id, event);
  528. object->oob_event_mask = 0;
  529. fscache_stat(&fscache_n_cop_lookup_complete);
  530. object->cache->ops->lookup_complete(object);
  531. fscache_stat_d(&fscache_n_cop_lookup_complete);
  532. set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags);
  533. cookie = object->cookie;
  534. set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
  535. if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
  536. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
  537. fscache_done_parent_op(object);
  538. return transit_to(KILL_OBJECT);
  539. }
  540. /*
  541. * Wait for completion of all active operations on this object and the death of
  542. * all child objects of this object.
  543. */
  544. static const struct fscache_state *fscache_kill_object(struct fscache_object *object,
  545. int event)
  546. {
  547. _enter("{OBJ%x,%d,%d},%d",
  548. object->debug_id, object->n_ops, object->n_children, event);
  549. fscache_mark_object_dead(object);
  550. object->oob_event_mask = 0;
  551. if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
  552. /* Reject any new read/write ops and abort any that are pending. */
  553. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  554. fscache_cancel_all_ops(object);
  555. }
  556. if (list_empty(&object->dependents) &&
  557. object->n_ops == 0 &&
  558. object->n_children == 0)
  559. return transit_to(DROP_OBJECT);
  560. if (object->n_in_progress == 0) {
  561. spin_lock(&object->lock);
  562. if (object->n_ops > 0 && object->n_in_progress == 0)
  563. fscache_start_operations(object);
  564. spin_unlock(&object->lock);
  565. }
  566. if (!list_empty(&object->dependents))
  567. return transit_to(KILL_DEPENDENTS);
  568. return transit_to(WAIT_FOR_CLEARANCE);
  569. }
  570. /*
  571. * Kill dependent objects.
  572. */
  573. static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object,
  574. int event)
  575. {
  576. _enter("{OBJ%x},%d", object->debug_id, event);
  577. if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL))
  578. return NO_TRANSIT; /* Not finished */
  579. return transit_to(WAIT_FOR_CLEARANCE);
  580. }
  581. /*
  582. * Drop an object's attachments
  583. */
  584. static const struct fscache_state *fscache_drop_object(struct fscache_object *object,
  585. int event)
  586. {
  587. struct fscache_object *parent = object->parent;
  588. struct fscache_cookie *cookie = object->cookie;
  589. struct fscache_cache *cache = object->cache;
  590. bool awaken = false;
  591. _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event);
  592. ASSERT(cookie != NULL);
  593. ASSERT(!hlist_unhashed(&object->cookie_link));
  594. /* Make sure the cookie no longer points here and that the netfs isn't
  595. * waiting for us.
  596. */
  597. spin_lock(&cookie->lock);
  598. hlist_del_init(&object->cookie_link);
  599. if (hlist_empty(&cookie->backing_objects) &&
  600. test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
  601. awaken = true;
  602. spin_unlock(&cookie->lock);
  603. if (awaken)
  604. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
  605. /* Prevent a race with our last child, which has to signal EV_CLEARED
  606. * before dropping our spinlock.
  607. */
  608. spin_lock(&object->lock);
  609. spin_unlock(&object->lock);
  610. /* Discard from the cache's collection of objects */
  611. spin_lock(&cache->object_list_lock);
  612. list_del_init(&object->cache_link);
  613. spin_unlock(&cache->object_list_lock);
  614. fscache_stat(&fscache_n_cop_drop_object);
  615. cache->ops->drop_object(object);
  616. fscache_stat_d(&fscache_n_cop_drop_object);
  617. /* The parent object wants to know when all it dependents have gone */
  618. if (parent) {
  619. _debug("release parent OBJ%x {%d}",
  620. parent->debug_id, parent->n_children);
  621. spin_lock(&parent->lock);
  622. parent->n_children--;
  623. if (parent->n_children == 0)
  624. fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
  625. spin_unlock(&parent->lock);
  626. object->parent = NULL;
  627. }
  628. /* this just shifts the object release to the work processor */
  629. fscache_put_object(object);
  630. fscache_stat(&fscache_n_object_dead);
  631. _leave("");
  632. return transit_to(OBJECT_DEAD);
  633. }
  634. /*
  635. * get a ref on an object
  636. */
  637. static int fscache_get_object(struct fscache_object *object)
  638. {
  639. int ret;
  640. fscache_stat(&fscache_n_cop_grab_object);
  641. ret = object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
  642. fscache_stat_d(&fscache_n_cop_grab_object);
  643. return ret;
  644. }
  645. /*
  646. * Discard a ref on an object
  647. */
  648. static void fscache_put_object(struct fscache_object *object)
  649. {
  650. fscache_stat(&fscache_n_cop_put_object);
  651. object->cache->ops->put_object(object);
  652. fscache_stat_d(&fscache_n_cop_put_object);
  653. }
  654. /**
  655. * fscache_object_destroy - Note that a cache object is about to be destroyed
  656. * @object: The object to be destroyed
  657. *
  658. * Note the imminent destruction and deallocation of a cache object record.
  659. */
  660. void fscache_object_destroy(struct fscache_object *object)
  661. {
  662. fscache_objlist_remove(object);
  663. /* We can get rid of the cookie now */
  664. fscache_cookie_put(object->cookie);
  665. object->cookie = NULL;
  666. }
  667. EXPORT_SYMBOL(fscache_object_destroy);
  668. /*
  669. * enqueue an object for metadata-type processing
  670. */
  671. void fscache_enqueue_object(struct fscache_object *object)
  672. {
  673. _enter("{OBJ%x}", object->debug_id);
  674. if (fscache_get_object(object) >= 0) {
  675. wait_queue_head_t *cong_wq =
  676. &get_cpu_var(fscache_object_cong_wait);
  677. if (queue_work(fscache_object_wq, &object->work)) {
  678. if (fscache_object_congested())
  679. wake_up(cong_wq);
  680. } else
  681. fscache_put_object(object);
  682. put_cpu_var(fscache_object_cong_wait);
  683. }
  684. }
  685. /**
  686. * fscache_object_sleep_till_congested - Sleep until object wq is congested
  687. * @timeoutp: Scheduler sleep timeout
  688. *
  689. * Allow an object handler to sleep until the object workqueue is congested.
  690. *
  691. * The caller must set up a wake up event before calling this and must have set
  692. * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
  693. * condition before calling this function as no test is made here.
  694. *
  695. * %true is returned if the object wq is congested, %false otherwise.
  696. */
  697. bool fscache_object_sleep_till_congested(signed long *timeoutp)
  698. {
  699. wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
  700. DEFINE_WAIT(wait);
  701. if (fscache_object_congested())
  702. return true;
  703. add_wait_queue_exclusive(cong_wq, &wait);
  704. if (!fscache_object_congested())
  705. *timeoutp = schedule_timeout(*timeoutp);
  706. finish_wait(cong_wq, &wait);
  707. return fscache_object_congested();
  708. }
  709. EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested);
  710. /*
  711. * Enqueue the dependents of an object for metadata-type processing.
  712. *
  713. * If we don't manage to finish the list before the scheduler wants to run
  714. * again then return false immediately. We return true if the list was
  715. * cleared.
  716. */
  717. static bool fscache_enqueue_dependents(struct fscache_object *object, int event)
  718. {
  719. struct fscache_object *dep;
  720. bool ret = true;
  721. _enter("{OBJ%x}", object->debug_id);
  722. if (list_empty(&object->dependents))
  723. return true;
  724. spin_lock(&object->lock);
  725. while (!list_empty(&object->dependents)) {
  726. dep = list_entry(object->dependents.next,
  727. struct fscache_object, dep_link);
  728. list_del_init(&dep->dep_link);
  729. fscache_raise_event(dep, event);
  730. fscache_put_object(dep);
  731. if (!list_empty(&object->dependents) && need_resched()) {
  732. ret = false;
  733. break;
  734. }
  735. }
  736. spin_unlock(&object->lock);
  737. return ret;
  738. }
  739. /*
  740. * remove an object from whatever queue it's waiting on
  741. */
  742. static void fscache_dequeue_object(struct fscache_object *object)
  743. {
  744. _enter("{OBJ%x}", object->debug_id);
  745. if (!list_empty(&object->dep_link)) {
  746. spin_lock(&object->parent->lock);
  747. list_del_init(&object->dep_link);
  748. spin_unlock(&object->parent->lock);
  749. }
  750. _leave("");
  751. }
  752. /**
  753. * fscache_check_aux - Ask the netfs whether an object on disk is still valid
  754. * @object: The object to ask about
  755. * @data: The auxiliary data for the object
  756. * @datalen: The size of the auxiliary data
  757. *
  758. * This function consults the netfs about the coherency state of an object.
  759. * The caller must be holding a ref on cookie->n_active (held by
  760. * fscache_look_up_object() on behalf of the cache backend during object lookup
  761. * and creation).
  762. */
  763. enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
  764. const void *data, uint16_t datalen)
  765. {
  766. enum fscache_checkaux result;
  767. if (!object->cookie->def->check_aux) {
  768. fscache_stat(&fscache_n_checkaux_none);
  769. return FSCACHE_CHECKAUX_OKAY;
  770. }
  771. result = object->cookie->def->check_aux(object->cookie->netfs_data,
  772. data, datalen);
  773. switch (result) {
  774. /* entry okay as is */
  775. case FSCACHE_CHECKAUX_OKAY:
  776. fscache_stat(&fscache_n_checkaux_okay);
  777. break;
  778. /* entry requires update */
  779. case FSCACHE_CHECKAUX_NEEDS_UPDATE:
  780. fscache_stat(&fscache_n_checkaux_update);
  781. break;
  782. /* entry requires deletion */
  783. case FSCACHE_CHECKAUX_OBSOLETE:
  784. fscache_stat(&fscache_n_checkaux_obsolete);
  785. break;
  786. default:
  787. BUG();
  788. }
  789. return result;
  790. }
  791. EXPORT_SYMBOL(fscache_check_aux);
  792. /*
  793. * Asynchronously invalidate an object.
  794. */
  795. static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object,
  796. int event)
  797. {
  798. struct fscache_operation *op;
  799. struct fscache_cookie *cookie = object->cookie;
  800. _enter("{OBJ%x},%d", object->debug_id, event);
  801. /* We're going to need the cookie. If the cookie is not available then
  802. * retire the object instead.
  803. */
  804. if (!fscache_use_cookie(object)) {
  805. ASSERT(object->cookie->stores.rnode == NULL);
  806. set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
  807. _leave(" [no cookie]");
  808. return transit_to(KILL_OBJECT);
  809. }
  810. /* Reject any new read/write ops and abort any that are pending. */
  811. fscache_invalidate_writes(cookie);
  812. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  813. fscache_cancel_all_ops(object);
  814. /* Now we have to wait for in-progress reads and writes */
  815. op = kzalloc(sizeof(*op), GFP_KERNEL);
  816. if (!op)
  817. goto nomem;
  818. fscache_operation_init(op, object->cache->ops->invalidate_object,
  819. NULL, NULL);
  820. op->flags = FSCACHE_OP_ASYNC |
  821. (1 << FSCACHE_OP_EXCLUSIVE) |
  822. (1 << FSCACHE_OP_UNUSE_COOKIE);
  823. spin_lock(&cookie->lock);
  824. if (fscache_submit_exclusive_op(object, op) < 0)
  825. goto submit_op_failed;
  826. spin_unlock(&cookie->lock);
  827. fscache_put_operation(op);
  828. /* Once we've completed the invalidation, we know there will be no data
  829. * stored in the cache and thus we can reinstate the data-check-skip
  830. * optimisation.
  831. */
  832. set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  833. /* We can allow read and write requests to come in once again. They'll
  834. * queue up behind our exclusive invalidation operation.
  835. */
  836. if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
  837. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
  838. _leave(" [ok]");
  839. return transit_to(UPDATE_OBJECT);
  840. nomem:
  841. fscache_mark_object_dead(object);
  842. fscache_unuse_cookie(object);
  843. _leave(" [ENOMEM]");
  844. return transit_to(KILL_OBJECT);
  845. submit_op_failed:
  846. fscache_mark_object_dead(object);
  847. spin_unlock(&cookie->lock);
  848. fscache_unuse_cookie(object);
  849. kfree(op);
  850. _leave(" [EIO]");
  851. return transit_to(KILL_OBJECT);
  852. }
  853. static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object,
  854. int event)
  855. {
  856. const struct fscache_state *s;
  857. fscache_stat(&fscache_n_invalidates_run);
  858. fscache_stat(&fscache_n_cop_invalidate_object);
  859. s = _fscache_invalidate_object(object, event);
  860. fscache_stat_d(&fscache_n_cop_invalidate_object);
  861. return s;
  862. }
  863. /*
  864. * Asynchronously update an object.
  865. */
  866. static const struct fscache_state *fscache_update_object(struct fscache_object *object,
  867. int event)
  868. {
  869. _enter("{OBJ%x},%d", object->debug_id, event);
  870. fscache_stat(&fscache_n_updates_run);
  871. fscache_stat(&fscache_n_cop_update_object);
  872. object->cache->ops->update_object(object);
  873. fscache_stat_d(&fscache_n_cop_update_object);
  874. _leave("");
  875. return transit_to(WAIT_FOR_CMD);
  876. }
  877. /**
  878. * fscache_object_retrying_stale - Note retrying stale object
  879. * @object: The object that will be retried
  880. *
  881. * Note that an object lookup found an on-disk object that was adjudged to be
  882. * stale and has been deleted. The lookup will be retried.
  883. */
  884. void fscache_object_retrying_stale(struct fscache_object *object)
  885. {
  886. fscache_stat(&fscache_n_cache_no_space_reject);
  887. }
  888. EXPORT_SYMBOL(fscache_object_retrying_stale);
  889. /**
  890. * fscache_object_mark_killed - Note that an object was killed
  891. * @object: The object that was culled
  892. * @why: The reason the object was killed.
  893. *
  894. * Note that an object was killed. Returns true if the object was
  895. * already marked killed, false if it wasn't.
  896. */
  897. void fscache_object_mark_killed(struct fscache_object *object,
  898. enum fscache_why_object_killed why)
  899. {
  900. if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) {
  901. pr_err("Error: Object already killed by cache [%s]\n",
  902. object->cache->identifier);
  903. return;
  904. }
  905. switch (why) {
  906. case FSCACHE_OBJECT_NO_SPACE:
  907. fscache_stat(&fscache_n_cache_no_space_reject);
  908. break;
  909. case FSCACHE_OBJECT_IS_STALE:
  910. fscache_stat(&fscache_n_cache_stale_objects);
  911. break;
  912. case FSCACHE_OBJECT_WAS_RETIRED:
  913. fscache_stat(&fscache_n_cache_retired_objects);
  914. break;
  915. case FSCACHE_OBJECT_WAS_CULLED:
  916. fscache_stat(&fscache_n_cache_culled_objects);
  917. break;
  918. }
  919. }
  920. EXPORT_SYMBOL(fscache_object_mark_killed);
  921. /*
  922. * The object is dead. We can get here if an object gets queued by an event
  923. * that would lead to its death (such as EV_KILL) when the dispatcher is
  924. * already running (and so can be requeued) but hasn't yet cleared the event
  925. * mask.
  926. */
  927. static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
  928. int event)
  929. {
  930. if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
  931. &object->flags))
  932. return NO_TRANSIT;
  933. WARN(true, "FS-Cache object redispatched after death");
  934. return NO_TRANSIT;
  935. }