object.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138
  1. /* FS-Cache object state machine handler
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. * See Documentation/filesystems/caching/object.txt for a description of the
  12. * object state machine and the in-kernel representations.
  13. */
  14. #define FSCACHE_DEBUG_LEVEL COOKIE
  15. #include <linux/module.h>
  16. #include <linux/slab.h>
  17. #include <linux/prefetch.h>
  18. #include "internal.h"
  19. static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int);
  20. static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int);
  21. static const struct fscache_state *fscache_drop_object(struct fscache_object *, int);
  22. static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int);
  23. static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int);
  24. static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int);
  25. static const struct fscache_state *fscache_kill_object(struct fscache_object *, int);
  26. static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int);
  27. static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int);
  28. static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
  29. static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
  30. static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
  31. static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
  32. #define __STATE_NAME(n) fscache_osm_##n
  33. #define STATE(n) (&__STATE_NAME(n))
  34. /*
  35. * Define a work state. Work states are execution states. No event processing
  36. * is performed by them. The function attached to a work state returns a
  37. * pointer indicating the next state to which the state machine should
  38. * transition. Returning NO_TRANSIT repeats the current state, but goes back
  39. * to the scheduler first.
  40. */
  41. #define WORK_STATE(n, sn, f) \
  42. const struct fscache_state __STATE_NAME(n) = { \
  43. .name = #n, \
  44. .short_name = sn, \
  45. .work = f \
  46. }
  47. /*
  48. * Returns from work states.
  49. */
  50. #define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); })
  51. #define NO_TRANSIT ((struct fscache_state *)NULL)
  52. /*
  53. * Define a wait state. Wait states are event processing states. No execution
  54. * is performed by them. Wait states are just tables of "if event X occurs,
  55. * clear it and transition to state Y". The dispatcher returns to the
  56. * scheduler if none of the events in which the wait state has an interest are
  57. * currently pending.
  58. */
  59. #define WAIT_STATE(n, sn, ...) \
  60. const struct fscache_state __STATE_NAME(n) = { \
  61. .name = #n, \
  62. .short_name = sn, \
  63. .work = NULL, \
  64. .transitions = { __VA_ARGS__, { 0, NULL } } \
  65. }
  66. #define TRANSIT_TO(state, emask) \
  67. { .events = (emask), .transit_to = STATE(state) }
  68. /*
  69. * The object state machine.
  70. */
  71. static WORK_STATE(INIT_OBJECT, "INIT", fscache_initialise_object);
  72. static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready);
  73. static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation);
  74. static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object);
  75. static WORK_STATE(CREATE_OBJECT, "CRTO", fscache_look_up_object);
  76. static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available);
  77. static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents);
  78. static WORK_STATE(INVALIDATE_OBJECT, "INVL", fscache_invalidate_object);
  79. static WORK_STATE(UPDATE_OBJECT, "UPDT", fscache_update_object);
  80. static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
  81. static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
  82. static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
  83. static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
  84. static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
  85. static WAIT_STATE(WAIT_FOR_INIT, "?INI",
  86. TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
  87. static WAIT_STATE(WAIT_FOR_PARENT, "?PRN",
  88. TRANSIT_TO(PARENT_READY, 1 << FSCACHE_OBJECT_EV_PARENT_READY));
  89. static WAIT_STATE(WAIT_FOR_CMD, "?CMD",
  90. TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE),
  91. TRANSIT_TO(UPDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_UPDATE),
  92. TRANSIT_TO(JUMPSTART_DEPS, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
  93. static WAIT_STATE(WAIT_FOR_CLEARANCE, "?CLR",
  94. TRANSIT_TO(KILL_OBJECT, 1 << FSCACHE_OBJECT_EV_CLEARED));
  95. /*
  96. * Out-of-band event transition tables. These are for handling unexpected
  97. * events, such as an I/O error. If an OOB event occurs, the state machine
  98. * clears and disables the event and forces a transition to the nominated work
  99. * state (acurrently executing work states will complete first).
  100. *
  101. * In such a situation, object->state remembers the state the machine should
  102. * have been in/gone to and returning NO_TRANSIT returns to that.
  103. */
  104. static const struct fscache_transition fscache_osm_init_oob[] = {
  105. TRANSIT_TO(ABORT_INIT,
  106. (1 << FSCACHE_OBJECT_EV_ERROR) |
  107. (1 << FSCACHE_OBJECT_EV_KILL)),
  108. { 0, NULL }
  109. };
  110. static const struct fscache_transition fscache_osm_lookup_oob[] = {
  111. TRANSIT_TO(LOOKUP_FAILURE,
  112. (1 << FSCACHE_OBJECT_EV_ERROR) |
  113. (1 << FSCACHE_OBJECT_EV_KILL)),
  114. { 0, NULL }
  115. };
  116. static const struct fscache_transition fscache_osm_run_oob[] = {
  117. TRANSIT_TO(KILL_OBJECT,
  118. (1 << FSCACHE_OBJECT_EV_ERROR) |
  119. (1 << FSCACHE_OBJECT_EV_KILL)),
  120. { 0, NULL }
  121. };
  122. static int fscache_get_object(struct fscache_object *,
  123. enum fscache_obj_ref_trace);
  124. static void fscache_put_object(struct fscache_object *,
  125. enum fscache_obj_ref_trace);
  126. static bool fscache_enqueue_dependents(struct fscache_object *, int);
  127. static void fscache_dequeue_object(struct fscache_object *);
  128. static void fscache_update_aux_data(struct fscache_object *);
  129. /*
  130. * we need to notify the parent when an op completes that we had outstanding
  131. * upon it
  132. */
  133. static inline void fscache_done_parent_op(struct fscache_object *object)
  134. {
  135. struct fscache_object *parent = object->parent;
  136. _enter("OBJ%x {OBJ%x,%x}",
  137. object->debug_id, parent->debug_id, parent->n_ops);
  138. spin_lock_nested(&parent->lock, 1);
  139. parent->n_obj_ops--;
  140. parent->n_ops--;
  141. if (parent->n_ops == 0)
  142. fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
  143. spin_unlock(&parent->lock);
  144. }
  145. /*
  146. * Object state machine dispatcher.
  147. */
  148. static void fscache_object_sm_dispatcher(struct fscache_object *object)
  149. {
  150. const struct fscache_transition *t;
  151. const struct fscache_state *state, *new_state;
  152. unsigned long events, event_mask;
  153. bool oob;
  154. int event = -1;
  155. ASSERT(object != NULL);
  156. _enter("{OBJ%x,%s,%lx}",
  157. object->debug_id, object->state->name, object->events);
  158. event_mask = object->event_mask;
  159. restart:
  160. object->event_mask = 0; /* Mask normal event handling */
  161. state = object->state;
  162. restart_masked:
  163. events = object->events;
  164. /* Handle any out-of-band events (typically an error) */
  165. if (events & object->oob_event_mask) {
  166. _debug("{OBJ%x} oob %lx",
  167. object->debug_id, events & object->oob_event_mask);
  168. oob = true;
  169. for (t = object->oob_table; t->events; t++) {
  170. if (events & t->events) {
  171. state = t->transit_to;
  172. ASSERT(state->work != NULL);
  173. event = fls(events & t->events) - 1;
  174. __clear_bit(event, &object->oob_event_mask);
  175. clear_bit(event, &object->events);
  176. goto execute_work_state;
  177. }
  178. }
  179. }
  180. oob = false;
  181. /* Wait states are just transition tables */
  182. if (!state->work) {
  183. if (events & event_mask) {
  184. for (t = state->transitions; t->events; t++) {
  185. if (events & t->events) {
  186. new_state = t->transit_to;
  187. event = fls(events & t->events) - 1;
  188. trace_fscache_osm(object, state,
  189. true, false, event);
  190. clear_bit(event, &object->events);
  191. _debug("{OBJ%x} ev %d: %s -> %s",
  192. object->debug_id, event,
  193. state->name, new_state->name);
  194. object->state = state = new_state;
  195. goto execute_work_state;
  196. }
  197. }
  198. /* The event mask didn't include all the tabled bits */
  199. BUG();
  200. }
  201. /* Randomly woke up */
  202. goto unmask_events;
  203. }
  204. execute_work_state:
  205. _debug("{OBJ%x} exec %s", object->debug_id, state->name);
  206. trace_fscache_osm(object, state, false, oob, event);
  207. new_state = state->work(object, event);
  208. event = -1;
  209. if (new_state == NO_TRANSIT) {
  210. _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
  211. if (unlikely(state == STATE(OBJECT_DEAD))) {
  212. _leave(" [dead]");
  213. return;
  214. }
  215. fscache_enqueue_object(object);
  216. event_mask = object->oob_event_mask;
  217. goto unmask_events;
  218. }
  219. _debug("{OBJ%x} %s -> %s",
  220. object->debug_id, state->name, new_state->name);
  221. object->state = state = new_state;
  222. if (state->work) {
  223. if (unlikely(state == STATE(OBJECT_DEAD))) {
  224. _leave(" [dead]");
  225. return;
  226. }
  227. goto restart_masked;
  228. }
  229. /* Transited to wait state */
  230. event_mask = object->oob_event_mask;
  231. for (t = state->transitions; t->events; t++)
  232. event_mask |= t->events;
  233. unmask_events:
  234. object->event_mask = event_mask;
  235. smp_mb();
  236. events = object->events;
  237. if (events & event_mask)
  238. goto restart;
  239. _leave(" [msk %lx]", event_mask);
  240. }
  241. /*
  242. * execute an object
  243. */
  244. static void fscache_object_work_func(struct work_struct *work)
  245. {
  246. struct fscache_object *object =
  247. container_of(work, struct fscache_object, work);
  248. unsigned long start;
  249. _enter("{OBJ%x}", object->debug_id);
  250. start = jiffies;
  251. fscache_object_sm_dispatcher(object);
  252. fscache_hist(fscache_objs_histogram, start);
  253. fscache_put_object(object, fscache_obj_put_work);
  254. }
  255. /**
  256. * fscache_object_init - Initialise a cache object description
  257. * @object: Object description
  258. * @cookie: Cookie object will be attached to
  259. * @cache: Cache in which backing object will be found
  260. *
  261. * Initialise a cache object description to its basic values.
  262. *
  263. * See Documentation/filesystems/caching/backend-api.txt for a complete
  264. * description.
  265. */
  266. void fscache_object_init(struct fscache_object *object,
  267. struct fscache_cookie *cookie,
  268. struct fscache_cache *cache)
  269. {
  270. const struct fscache_transition *t;
  271. atomic_inc(&cache->object_count);
  272. object->state = STATE(WAIT_FOR_INIT);
  273. object->oob_table = fscache_osm_init_oob;
  274. object->flags = 1 << FSCACHE_OBJECT_IS_LIVE;
  275. spin_lock_init(&object->lock);
  276. INIT_LIST_HEAD(&object->cache_link);
  277. INIT_HLIST_NODE(&object->cookie_link);
  278. INIT_WORK(&object->work, fscache_object_work_func);
  279. INIT_LIST_HEAD(&object->dependents);
  280. INIT_LIST_HEAD(&object->dep_link);
  281. INIT_LIST_HEAD(&object->pending_ops);
  282. object->n_children = 0;
  283. object->n_ops = object->n_in_progress = object->n_exclusive = 0;
  284. object->events = 0;
  285. object->store_limit = 0;
  286. object->store_limit_l = 0;
  287. object->cache = cache;
  288. object->cookie = cookie;
  289. fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
  290. object->parent = NULL;
  291. #ifdef CONFIG_FSCACHE_OBJECT_LIST
  292. RB_CLEAR_NODE(&object->objlist_link);
  293. #endif
  294. object->oob_event_mask = 0;
  295. for (t = object->oob_table; t->events; t++)
  296. object->oob_event_mask |= t->events;
  297. object->event_mask = object->oob_event_mask;
  298. for (t = object->state->transitions; t->events; t++)
  299. object->event_mask |= t->events;
  300. }
  301. EXPORT_SYMBOL(fscache_object_init);
  302. /*
  303. * Mark the object as no longer being live, making sure that we synchronise
  304. * against op submission.
  305. */
  306. static inline void fscache_mark_object_dead(struct fscache_object *object)
  307. {
  308. spin_lock(&object->lock);
  309. clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
  310. spin_unlock(&object->lock);
  311. }
  312. /*
  313. * Abort object initialisation before we start it.
  314. */
  315. static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object,
  316. int event)
  317. {
  318. _enter("{OBJ%x},%d", object->debug_id, event);
  319. object->oob_event_mask = 0;
  320. fscache_dequeue_object(object);
  321. return transit_to(KILL_OBJECT);
  322. }
  323. /*
  324. * initialise an object
  325. * - check the specified object's parent to see if we can make use of it
  326. * immediately to do a creation
  327. * - we may need to start the process of creating a parent and we need to wait
  328. * for the parent's lookup and creation to complete if it's not there yet
  329. */
  330. static const struct fscache_state *fscache_initialise_object(struct fscache_object *object,
  331. int event)
  332. {
  333. struct fscache_object *parent;
  334. bool success;
  335. _enter("{OBJ%x},%d", object->debug_id, event);
  336. ASSERT(list_empty(&object->dep_link));
  337. parent = object->parent;
  338. if (!parent) {
  339. _leave(" [no parent]");
  340. return transit_to(DROP_OBJECT);
  341. }
  342. _debug("parent: %s of:%lx", parent->state->name, parent->flags);
  343. if (fscache_object_is_dying(parent)) {
  344. _leave(" [bad parent]");
  345. return transit_to(DROP_OBJECT);
  346. }
  347. if (fscache_object_is_available(parent)) {
  348. _leave(" [ready]");
  349. return transit_to(PARENT_READY);
  350. }
  351. _debug("wait");
  352. spin_lock(&parent->lock);
  353. fscache_stat(&fscache_n_cop_grab_object);
  354. success = false;
  355. if (fscache_object_is_live(parent) &&
  356. object->cache->ops->grab_object(object, fscache_obj_get_add_to_deps)) {
  357. list_add(&object->dep_link, &parent->dependents);
  358. success = true;
  359. }
  360. fscache_stat_d(&fscache_n_cop_grab_object);
  361. spin_unlock(&parent->lock);
  362. if (!success) {
  363. _leave(" [grab failed]");
  364. return transit_to(DROP_OBJECT);
  365. }
  366. /* fscache_acquire_non_index_cookie() uses this
  367. * to wake the chain up */
  368. fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD);
  369. _leave(" [wait]");
  370. return transit_to(WAIT_FOR_PARENT);
  371. }
  372. /*
  373. * Once the parent object is ready, we should kick off our lookup op.
  374. */
  375. static const struct fscache_state *fscache_parent_ready(struct fscache_object *object,
  376. int event)
  377. {
  378. struct fscache_object *parent = object->parent;
  379. _enter("{OBJ%x},%d", object->debug_id, event);
  380. ASSERT(parent != NULL);
  381. spin_lock(&parent->lock);
  382. parent->n_ops++;
  383. parent->n_obj_ops++;
  384. object->lookup_jif = jiffies;
  385. spin_unlock(&parent->lock);
  386. _leave("");
  387. return transit_to(LOOK_UP_OBJECT);
  388. }
  389. /*
  390. * look an object up in the cache from which it was allocated
  391. * - we hold an "access lock" on the parent object, so the parent object cannot
  392. * be withdrawn by either party till we've finished
  393. */
  394. static const struct fscache_state *fscache_look_up_object(struct fscache_object *object,
  395. int event)
  396. {
  397. struct fscache_cookie *cookie = object->cookie;
  398. struct fscache_object *parent = object->parent;
  399. int ret;
  400. _enter("{OBJ%x},%d", object->debug_id, event);
  401. object->oob_table = fscache_osm_lookup_oob;
  402. ASSERT(parent != NULL);
  403. ASSERTCMP(parent->n_ops, >, 0);
  404. ASSERTCMP(parent->n_obj_ops, >, 0);
  405. /* make sure the parent is still available */
  406. ASSERT(fscache_object_is_available(parent));
  407. if (fscache_object_is_dying(parent) ||
  408. test_bit(FSCACHE_IOERROR, &object->cache->flags) ||
  409. !fscache_use_cookie(object)) {
  410. _leave(" [unavailable]");
  411. return transit_to(LOOKUP_FAILURE);
  412. }
  413. _debug("LOOKUP \"%s\" in \"%s\"",
  414. cookie->def->name, object->cache->tag->name);
  415. fscache_stat(&fscache_n_object_lookups);
  416. fscache_stat(&fscache_n_cop_lookup_object);
  417. ret = object->cache->ops->lookup_object(object);
  418. fscache_stat_d(&fscache_n_cop_lookup_object);
  419. fscache_unuse_cookie(object);
  420. if (ret == -ETIMEDOUT) {
  421. /* probably stuck behind another object, so move this one to
  422. * the back of the queue */
  423. fscache_stat(&fscache_n_object_lookups_timed_out);
  424. _leave(" [timeout]");
  425. return NO_TRANSIT;
  426. }
  427. if (ret < 0) {
  428. _leave(" [error]");
  429. return transit_to(LOOKUP_FAILURE);
  430. }
  431. _leave(" [ok]");
  432. return transit_to(OBJECT_AVAILABLE);
  433. }
  434. /**
  435. * fscache_object_lookup_negative - Note negative cookie lookup
  436. * @object: Object pointing to cookie to mark
  437. *
  438. * Note negative lookup, permitting those waiting to read data from an already
  439. * existing backing object to continue as there's no data for them to read.
  440. */
  441. void fscache_object_lookup_negative(struct fscache_object *object)
  442. {
  443. struct fscache_cookie *cookie = object->cookie;
  444. _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
  445. if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
  446. fscache_stat(&fscache_n_object_lookups_negative);
  447. /* Allow write requests to begin stacking up and read requests to begin
  448. * returning ENODATA.
  449. */
  450. set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  451. clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
  452. _debug("wake up lookup %p", &cookie->flags);
  453. clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
  454. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
  455. }
  456. _leave("");
  457. }
  458. EXPORT_SYMBOL(fscache_object_lookup_negative);
  459. /**
  460. * fscache_obtained_object - Note successful object lookup or creation
  461. * @object: Object pointing to cookie to mark
  462. *
  463. * Note successful lookup and/or creation, permitting those waiting to write
  464. * data to a backing object to continue.
  465. *
  466. * Note that after calling this, an object's cookie may be relinquished by the
  467. * netfs, and so must be accessed with object lock held.
  468. */
  469. void fscache_obtained_object(struct fscache_object *object)
  470. {
  471. struct fscache_cookie *cookie = object->cookie;
  472. _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
  473. /* if we were still looking up, then we must have a positive lookup
  474. * result, in which case there may be data available */
  475. if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
  476. fscache_stat(&fscache_n_object_lookups_positive);
  477. /* We do (presumably) have data */
  478. clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  479. clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
  480. /* Allow write requests to begin stacking up and read requests
  481. * to begin shovelling data.
  482. */
  483. clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
  484. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
  485. } else {
  486. fscache_stat(&fscache_n_object_created);
  487. }
  488. set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
  489. _leave("");
  490. }
  491. EXPORT_SYMBOL(fscache_obtained_object);
  492. /*
  493. * handle an object that has just become available
  494. */
  495. static const struct fscache_state *fscache_object_available(struct fscache_object *object,
  496. int event)
  497. {
  498. _enter("{OBJ%x},%d", object->debug_id, event);
  499. object->oob_table = fscache_osm_run_oob;
  500. spin_lock(&object->lock);
  501. fscache_done_parent_op(object);
  502. if (object->n_in_progress == 0) {
  503. if (object->n_ops > 0) {
  504. ASSERTCMP(object->n_ops, >=, object->n_obj_ops);
  505. fscache_start_operations(object);
  506. } else {
  507. ASSERT(list_empty(&object->pending_ops));
  508. }
  509. }
  510. spin_unlock(&object->lock);
  511. fscache_stat(&fscache_n_cop_lookup_complete);
  512. object->cache->ops->lookup_complete(object);
  513. fscache_stat_d(&fscache_n_cop_lookup_complete);
  514. fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
  515. fscache_stat(&fscache_n_object_avail);
  516. _leave("");
  517. return transit_to(JUMPSTART_DEPS);
  518. }
  519. /*
  520. * Wake up this object's dependent objects now that we've become available.
  521. */
  522. static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object,
  523. int event)
  524. {
  525. _enter("{OBJ%x},%d", object->debug_id, event);
  526. if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY))
  527. return NO_TRANSIT; /* Not finished; requeue */
  528. return transit_to(WAIT_FOR_CMD);
  529. }
  530. /*
  531. * Handle lookup or creation failute.
  532. */
  533. static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object,
  534. int event)
  535. {
  536. struct fscache_cookie *cookie;
  537. _enter("{OBJ%x},%d", object->debug_id, event);
  538. object->oob_event_mask = 0;
  539. fscache_stat(&fscache_n_cop_lookup_complete);
  540. object->cache->ops->lookup_complete(object);
  541. fscache_stat_d(&fscache_n_cop_lookup_complete);
  542. set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags);
  543. cookie = object->cookie;
  544. set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
  545. if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
  546. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
  547. fscache_done_parent_op(object);
  548. return transit_to(KILL_OBJECT);
  549. }
  550. /*
  551. * Wait for completion of all active operations on this object and the death of
  552. * all child objects of this object.
  553. */
  554. static const struct fscache_state *fscache_kill_object(struct fscache_object *object,
  555. int event)
  556. {
  557. _enter("{OBJ%x,%d,%d},%d",
  558. object->debug_id, object->n_ops, object->n_children, event);
  559. fscache_mark_object_dead(object);
  560. object->oob_event_mask = 0;
  561. if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
  562. /* Reject any new read/write ops and abort any that are pending. */
  563. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  564. fscache_cancel_all_ops(object);
  565. }
  566. if (list_empty(&object->dependents) &&
  567. object->n_ops == 0 &&
  568. object->n_children == 0)
  569. return transit_to(DROP_OBJECT);
  570. if (object->n_in_progress == 0) {
  571. spin_lock(&object->lock);
  572. if (object->n_ops > 0 && object->n_in_progress == 0)
  573. fscache_start_operations(object);
  574. spin_unlock(&object->lock);
  575. }
  576. if (!list_empty(&object->dependents))
  577. return transit_to(KILL_DEPENDENTS);
  578. return transit_to(WAIT_FOR_CLEARANCE);
  579. }
  580. /*
  581. * Kill dependent objects.
  582. */
  583. static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object,
  584. int event)
  585. {
  586. _enter("{OBJ%x},%d", object->debug_id, event);
  587. if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL))
  588. return NO_TRANSIT; /* Not finished */
  589. return transit_to(WAIT_FOR_CLEARANCE);
  590. }
  591. /*
  592. * Drop an object's attachments
  593. */
  594. static const struct fscache_state *fscache_drop_object(struct fscache_object *object,
  595. int event)
  596. {
  597. struct fscache_object *parent = object->parent;
  598. struct fscache_cookie *cookie = object->cookie;
  599. struct fscache_cache *cache = object->cache;
  600. bool awaken = false;
  601. _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event);
  602. ASSERT(cookie != NULL);
  603. ASSERT(!hlist_unhashed(&object->cookie_link));
  604. if (test_bit(FSCACHE_COOKIE_AUX_UPDATED, &cookie->flags)) {
  605. _debug("final update");
  606. fscache_update_aux_data(object);
  607. }
  608. /* Make sure the cookie no longer points here and that the netfs isn't
  609. * waiting for us.
  610. */
  611. spin_lock(&cookie->lock);
  612. hlist_del_init(&object->cookie_link);
  613. if (hlist_empty(&cookie->backing_objects) &&
  614. test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
  615. awaken = true;
  616. spin_unlock(&cookie->lock);
  617. if (awaken)
  618. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
  619. if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
  620. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
  621. /* Prevent a race with our last child, which has to signal EV_CLEARED
  622. * before dropping our spinlock.
  623. */
  624. spin_lock(&object->lock);
  625. spin_unlock(&object->lock);
  626. /* Discard from the cache's collection of objects */
  627. spin_lock(&cache->object_list_lock);
  628. list_del_init(&object->cache_link);
  629. spin_unlock(&cache->object_list_lock);
  630. fscache_stat(&fscache_n_cop_drop_object);
  631. cache->ops->drop_object(object);
  632. fscache_stat_d(&fscache_n_cop_drop_object);
  633. /* The parent object wants to know when all it dependents have gone */
  634. if (parent) {
  635. _debug("release parent OBJ%x {%d}",
  636. parent->debug_id, parent->n_children);
  637. spin_lock(&parent->lock);
  638. parent->n_children--;
  639. if (parent->n_children == 0)
  640. fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
  641. spin_unlock(&parent->lock);
  642. object->parent = NULL;
  643. }
  644. /* this just shifts the object release to the work processor */
  645. fscache_put_object(object, fscache_obj_put_drop_obj);
  646. fscache_stat(&fscache_n_object_dead);
  647. _leave("");
  648. return transit_to(OBJECT_DEAD);
  649. }
  650. /*
  651. * get a ref on an object
  652. */
  653. static int fscache_get_object(struct fscache_object *object,
  654. enum fscache_obj_ref_trace why)
  655. {
  656. int ret;
  657. fscache_stat(&fscache_n_cop_grab_object);
  658. ret = object->cache->ops->grab_object(object, why) ? 0 : -EAGAIN;
  659. fscache_stat_d(&fscache_n_cop_grab_object);
  660. return ret;
  661. }
  662. /*
  663. * Discard a ref on an object
  664. */
  665. static void fscache_put_object(struct fscache_object *object,
  666. enum fscache_obj_ref_trace why)
  667. {
  668. fscache_stat(&fscache_n_cop_put_object);
  669. object->cache->ops->put_object(object, why);
  670. fscache_stat_d(&fscache_n_cop_put_object);
  671. }
  672. /**
  673. * fscache_object_destroy - Note that a cache object is about to be destroyed
  674. * @object: The object to be destroyed
  675. *
  676. * Note the imminent destruction and deallocation of a cache object record.
  677. */
  678. void fscache_object_destroy(struct fscache_object *object)
  679. {
  680. fscache_objlist_remove(object);
  681. /* We can get rid of the cookie now */
  682. fscache_cookie_put(object->cookie, fscache_cookie_put_object);
  683. object->cookie = NULL;
  684. }
  685. EXPORT_SYMBOL(fscache_object_destroy);
  686. /*
  687. * enqueue an object for metadata-type processing
  688. */
  689. void fscache_enqueue_object(struct fscache_object *object)
  690. {
  691. _enter("{OBJ%x}", object->debug_id);
  692. if (fscache_get_object(object, fscache_obj_get_queue) >= 0) {
  693. wait_queue_head_t *cong_wq =
  694. &get_cpu_var(fscache_object_cong_wait);
  695. if (queue_work(fscache_object_wq, &object->work)) {
  696. if (fscache_object_congested())
  697. wake_up(cong_wq);
  698. } else
  699. fscache_put_object(object, fscache_obj_put_queue);
  700. put_cpu_var(fscache_object_cong_wait);
  701. }
  702. }
  703. /**
  704. * fscache_object_sleep_till_congested - Sleep until object wq is congested
  705. * @timeoutp: Scheduler sleep timeout
  706. *
  707. * Allow an object handler to sleep until the object workqueue is congested.
  708. *
  709. * The caller must set up a wake up event before calling this and must have set
  710. * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
  711. * condition before calling this function as no test is made here.
  712. *
  713. * %true is returned if the object wq is congested, %false otherwise.
  714. */
  715. bool fscache_object_sleep_till_congested(signed long *timeoutp)
  716. {
  717. wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
  718. DEFINE_WAIT(wait);
  719. if (fscache_object_congested())
  720. return true;
  721. add_wait_queue_exclusive(cong_wq, &wait);
  722. if (!fscache_object_congested())
  723. *timeoutp = schedule_timeout(*timeoutp);
  724. finish_wait(cong_wq, &wait);
  725. return fscache_object_congested();
  726. }
  727. EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested);
  728. /*
  729. * Enqueue the dependents of an object for metadata-type processing.
  730. *
  731. * If we don't manage to finish the list before the scheduler wants to run
  732. * again then return false immediately. We return true if the list was
  733. * cleared.
  734. */
  735. static bool fscache_enqueue_dependents(struct fscache_object *object, int event)
  736. {
  737. struct fscache_object *dep;
  738. bool ret = true;
  739. _enter("{OBJ%x}", object->debug_id);
  740. if (list_empty(&object->dependents))
  741. return true;
  742. spin_lock(&object->lock);
  743. while (!list_empty(&object->dependents)) {
  744. dep = list_entry(object->dependents.next,
  745. struct fscache_object, dep_link);
  746. list_del_init(&dep->dep_link);
  747. fscache_raise_event(dep, event);
  748. fscache_put_object(dep, fscache_obj_put_enq_dep);
  749. if (!list_empty(&object->dependents) && need_resched()) {
  750. ret = false;
  751. break;
  752. }
  753. }
  754. spin_unlock(&object->lock);
  755. return ret;
  756. }
  757. /*
  758. * remove an object from whatever queue it's waiting on
  759. */
  760. static void fscache_dequeue_object(struct fscache_object *object)
  761. {
  762. _enter("{OBJ%x}", object->debug_id);
  763. if (!list_empty(&object->dep_link)) {
  764. spin_lock(&object->parent->lock);
  765. list_del_init(&object->dep_link);
  766. spin_unlock(&object->parent->lock);
  767. }
  768. _leave("");
  769. }
  770. /**
  771. * fscache_check_aux - Ask the netfs whether an object on disk is still valid
  772. * @object: The object to ask about
  773. * @data: The auxiliary data for the object
  774. * @datalen: The size of the auxiliary data
  775. *
  776. * This function consults the netfs about the coherency state of an object.
  777. * The caller must be holding a ref on cookie->n_active (held by
  778. * fscache_look_up_object() on behalf of the cache backend during object lookup
  779. * and creation).
  780. */
  781. enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
  782. const void *data, uint16_t datalen,
  783. loff_t object_size)
  784. {
  785. enum fscache_checkaux result;
  786. if (!object->cookie->def->check_aux) {
  787. fscache_stat(&fscache_n_checkaux_none);
  788. return FSCACHE_CHECKAUX_OKAY;
  789. }
  790. result = object->cookie->def->check_aux(object->cookie->netfs_data,
  791. data, datalen, object_size);
  792. switch (result) {
  793. /* entry okay as is */
  794. case FSCACHE_CHECKAUX_OKAY:
  795. fscache_stat(&fscache_n_checkaux_okay);
  796. break;
  797. /* entry requires update */
  798. case FSCACHE_CHECKAUX_NEEDS_UPDATE:
  799. fscache_stat(&fscache_n_checkaux_update);
  800. break;
  801. /* entry requires deletion */
  802. case FSCACHE_CHECKAUX_OBSOLETE:
  803. fscache_stat(&fscache_n_checkaux_obsolete);
  804. break;
  805. default:
  806. BUG();
  807. }
  808. return result;
  809. }
  810. EXPORT_SYMBOL(fscache_check_aux);
  811. /*
  812. * Asynchronously invalidate an object.
  813. */
  814. static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object,
  815. int event)
  816. {
  817. struct fscache_operation *op;
  818. struct fscache_cookie *cookie = object->cookie;
  819. _enter("{OBJ%x},%d", object->debug_id, event);
  820. /* We're going to need the cookie. If the cookie is not available then
  821. * retire the object instead.
  822. */
  823. if (!fscache_use_cookie(object)) {
  824. ASSERT(radix_tree_empty(&object->cookie->stores));
  825. set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
  826. _leave(" [no cookie]");
  827. return transit_to(KILL_OBJECT);
  828. }
  829. /* Reject any new read/write ops and abort any that are pending. */
  830. fscache_invalidate_writes(cookie);
  831. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  832. fscache_cancel_all_ops(object);
  833. /* Now we have to wait for in-progress reads and writes */
  834. op = kzalloc(sizeof(*op), GFP_KERNEL);
  835. if (!op)
  836. goto nomem;
  837. fscache_operation_init(cookie, op, object->cache->ops->invalidate_object,
  838. NULL, NULL);
  839. op->flags = FSCACHE_OP_ASYNC |
  840. (1 << FSCACHE_OP_EXCLUSIVE) |
  841. (1 << FSCACHE_OP_UNUSE_COOKIE);
  842. trace_fscache_page_op(cookie, NULL, op, fscache_page_op_invalidate);
  843. spin_lock(&cookie->lock);
  844. if (fscache_submit_exclusive_op(object, op) < 0)
  845. goto submit_op_failed;
  846. spin_unlock(&cookie->lock);
  847. fscache_put_operation(op);
  848. /* Once we've completed the invalidation, we know there will be no data
  849. * stored in the cache and thus we can reinstate the data-check-skip
  850. * optimisation.
  851. */
  852. set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  853. /* We can allow read and write requests to come in once again. They'll
  854. * queue up behind our exclusive invalidation operation.
  855. */
  856. if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
  857. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
  858. _leave(" [ok]");
  859. return transit_to(UPDATE_OBJECT);
  860. nomem:
  861. fscache_mark_object_dead(object);
  862. fscache_unuse_cookie(object);
  863. _leave(" [ENOMEM]");
  864. return transit_to(KILL_OBJECT);
  865. submit_op_failed:
  866. fscache_mark_object_dead(object);
  867. spin_unlock(&cookie->lock);
  868. fscache_unuse_cookie(object);
  869. kfree(op);
  870. _leave(" [EIO]");
  871. return transit_to(KILL_OBJECT);
  872. }
  873. static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object,
  874. int event)
  875. {
  876. const struct fscache_state *s;
  877. fscache_stat(&fscache_n_invalidates_run);
  878. fscache_stat(&fscache_n_cop_invalidate_object);
  879. s = _fscache_invalidate_object(object, event);
  880. fscache_stat_d(&fscache_n_cop_invalidate_object);
  881. return s;
  882. }
  883. /*
  884. * Update auxiliary data.
  885. */
  886. static void fscache_update_aux_data(struct fscache_object *object)
  887. {
  888. fscache_stat(&fscache_n_updates_run);
  889. fscache_stat(&fscache_n_cop_update_object);
  890. object->cache->ops->update_object(object);
  891. fscache_stat_d(&fscache_n_cop_update_object);
  892. }
  893. /*
  894. * Asynchronously update an object.
  895. */
  896. static const struct fscache_state *fscache_update_object(struct fscache_object *object,
  897. int event)
  898. {
  899. _enter("{OBJ%x},%d", object->debug_id, event);
  900. fscache_update_aux_data(object);
  901. _leave("");
  902. return transit_to(WAIT_FOR_CMD);
  903. }
  904. /**
  905. * fscache_object_retrying_stale - Note retrying stale object
  906. * @object: The object that will be retried
  907. *
  908. * Note that an object lookup found an on-disk object that was adjudged to be
  909. * stale and has been deleted. The lookup will be retried.
  910. */
  911. void fscache_object_retrying_stale(struct fscache_object *object)
  912. {
  913. fscache_stat(&fscache_n_cache_no_space_reject);
  914. }
  915. EXPORT_SYMBOL(fscache_object_retrying_stale);
  916. /**
  917. * fscache_object_mark_killed - Note that an object was killed
  918. * @object: The object that was culled
  919. * @why: The reason the object was killed.
  920. *
  921. * Note that an object was killed. Returns true if the object was
  922. * already marked killed, false if it wasn't.
  923. */
  924. void fscache_object_mark_killed(struct fscache_object *object,
  925. enum fscache_why_object_killed why)
  926. {
  927. if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) {
  928. pr_err("Error: Object already killed by cache [%s]\n",
  929. object->cache->identifier);
  930. return;
  931. }
  932. switch (why) {
  933. case FSCACHE_OBJECT_NO_SPACE:
  934. fscache_stat(&fscache_n_cache_no_space_reject);
  935. break;
  936. case FSCACHE_OBJECT_IS_STALE:
  937. fscache_stat(&fscache_n_cache_stale_objects);
  938. break;
  939. case FSCACHE_OBJECT_WAS_RETIRED:
  940. fscache_stat(&fscache_n_cache_retired_objects);
  941. break;
  942. case FSCACHE_OBJECT_WAS_CULLED:
  943. fscache_stat(&fscache_n_cache_culled_objects);
  944. break;
  945. }
  946. }
  947. EXPORT_SYMBOL(fscache_object_mark_killed);
  948. /*
  949. * The object is dead. We can get here if an object gets queued by an event
  950. * that would lead to its death (such as EV_KILL) when the dispatcher is
  951. * already running (and so can be requeued) but hasn't yet cleared the event
  952. * mask.
  953. */
  954. static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
  955. int event)
  956. {
  957. if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
  958. &object->flags))
  959. return NO_TRANSIT;
  960. WARN(true, "FS-Cache object redispatched after death");
  961. return NO_TRANSIT;
  962. }