object.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080
  1. /* FS-Cache object state machine handler
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. * See Documentation/filesystems/caching/object.txt for a description of the
  12. * object state machine and the in-kernel representations.
  13. */
  14. #define FSCACHE_DEBUG_LEVEL COOKIE
  15. #include <linux/module.h>
  16. #include <linux/slab.h>
  17. #include <linux/prefetch.h>
  18. #include "internal.h"
  19. static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int);
  20. static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int);
  21. static const struct fscache_state *fscache_drop_object(struct fscache_object *, int);
  22. static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int);
  23. static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int);
  24. static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int);
  25. static const struct fscache_state *fscache_kill_object(struct fscache_object *, int);
  26. static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int);
  27. static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int);
  28. static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
  29. static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
  30. static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
  31. #define __STATE_NAME(n) fscache_osm_##n
  32. #define STATE(n) (&__STATE_NAME(n))
  33. /*
  34. * Define a work state. Work states are execution states. No event processing
  35. * is performed by them. The function attached to a work state returns a
  36. * pointer indicating the next state to which the state machine should
  37. * transition. Returning NO_TRANSIT repeats the current state, but goes back
  38. * to the scheduler first.
  39. */
  40. #define WORK_STATE(n, sn, f) \
  41. const struct fscache_state __STATE_NAME(n) = { \
  42. .name = #n, \
  43. .short_name = sn, \
  44. .work = f \
  45. }
  46. /*
  47. * Returns from work states.
  48. */
  49. #define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); })
  50. #define NO_TRANSIT ((struct fscache_state *)NULL)
  51. /*
  52. * Define a wait state. Wait states are event processing states. No execution
  53. * is performed by them. Wait states are just tables of "if event X occurs,
  54. * clear it and transition to state Y". The dispatcher returns to the
  55. * scheduler if none of the events in which the wait state has an interest are
  56. * currently pending.
  57. */
  58. #define WAIT_STATE(n, sn, ...) \
  59. const struct fscache_state __STATE_NAME(n) = { \
  60. .name = #n, \
  61. .short_name = sn, \
  62. .work = NULL, \
  63. .transitions = { __VA_ARGS__, { 0, NULL } } \
  64. }
  65. #define TRANSIT_TO(state, emask) \
  66. { .events = (emask), .transit_to = STATE(state) }
  67. /*
  68. * The object state machine.
  69. */
  70. static WORK_STATE(INIT_OBJECT, "INIT", fscache_initialise_object);
  71. static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready);
  72. static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation);
  73. static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object);
  74. static WORK_STATE(CREATE_OBJECT, "CRTO", fscache_look_up_object);
  75. static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available);
  76. static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents);
  77. static WORK_STATE(INVALIDATE_OBJECT, "INVL", fscache_invalidate_object);
  78. static WORK_STATE(UPDATE_OBJECT, "UPDT", fscache_update_object);
  79. static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
  80. static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
  81. static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
  82. static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
  83. static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL);
  84. static WAIT_STATE(WAIT_FOR_INIT, "?INI",
  85. TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
  86. static WAIT_STATE(WAIT_FOR_PARENT, "?PRN",
  87. TRANSIT_TO(PARENT_READY, 1 << FSCACHE_OBJECT_EV_PARENT_READY));
  88. static WAIT_STATE(WAIT_FOR_CMD, "?CMD",
  89. TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE),
  90. TRANSIT_TO(UPDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_UPDATE),
  91. TRANSIT_TO(JUMPSTART_DEPS, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
  92. static WAIT_STATE(WAIT_FOR_CLEARANCE, "?CLR",
  93. TRANSIT_TO(KILL_OBJECT, 1 << FSCACHE_OBJECT_EV_CLEARED));
  94. /*
  95. * Out-of-band event transition tables. These are for handling unexpected
  96. * events, such as an I/O error. If an OOB event occurs, the state machine
  97. * clears and disables the event and forces a transition to the nominated work
  98. * state (acurrently executing work states will complete first).
  99. *
  100. * In such a situation, object->state remembers the state the machine should
  101. * have been in/gone to and returning NO_TRANSIT returns to that.
  102. */
  103. static const struct fscache_transition fscache_osm_init_oob[] = {
  104. TRANSIT_TO(ABORT_INIT,
  105. (1 << FSCACHE_OBJECT_EV_ERROR) |
  106. (1 << FSCACHE_OBJECT_EV_KILL)),
  107. { 0, NULL }
  108. };
  109. static const struct fscache_transition fscache_osm_lookup_oob[] = {
  110. TRANSIT_TO(LOOKUP_FAILURE,
  111. (1 << FSCACHE_OBJECT_EV_ERROR) |
  112. (1 << FSCACHE_OBJECT_EV_KILL)),
  113. { 0, NULL }
  114. };
  115. static const struct fscache_transition fscache_osm_run_oob[] = {
  116. TRANSIT_TO(KILL_OBJECT,
  117. (1 << FSCACHE_OBJECT_EV_ERROR) |
  118. (1 << FSCACHE_OBJECT_EV_KILL)),
  119. { 0, NULL }
  120. };
  121. static int fscache_get_object(struct fscache_object *);
  122. static void fscache_put_object(struct fscache_object *);
  123. static bool fscache_enqueue_dependents(struct fscache_object *, int);
  124. static void fscache_dequeue_object(struct fscache_object *);
  125. /*
  126. * we need to notify the parent when an op completes that we had outstanding
  127. * upon it
  128. */
  129. static inline void fscache_done_parent_op(struct fscache_object *object)
  130. {
  131. struct fscache_object *parent = object->parent;
  132. _enter("OBJ%x {OBJ%x,%x}",
  133. object->debug_id, parent->debug_id, parent->n_ops);
  134. spin_lock_nested(&parent->lock, 1);
  135. parent->n_obj_ops--;
  136. parent->n_ops--;
  137. if (parent->n_ops == 0)
  138. fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
  139. spin_unlock(&parent->lock);
  140. }
  141. /*
  142. * Object state machine dispatcher.
  143. */
  144. static void fscache_object_sm_dispatcher(struct fscache_object *object)
  145. {
  146. const struct fscache_transition *t;
  147. const struct fscache_state *state, *new_state;
  148. unsigned long events, event_mask;
  149. int event = -1;
  150. ASSERT(object != NULL);
  151. _enter("{OBJ%x,%s,%lx}",
  152. object->debug_id, object->state->name, object->events);
  153. event_mask = object->event_mask;
  154. restart:
  155. object->event_mask = 0; /* Mask normal event handling */
  156. state = object->state;
  157. restart_masked:
  158. events = object->events;
  159. /* Handle any out-of-band events (typically an error) */
  160. if (events & object->oob_event_mask) {
  161. _debug("{OBJ%x} oob %lx",
  162. object->debug_id, events & object->oob_event_mask);
  163. for (t = object->oob_table; t->events; t++) {
  164. if (events & t->events) {
  165. state = t->transit_to;
  166. ASSERT(state->work != NULL);
  167. event = fls(events & t->events) - 1;
  168. __clear_bit(event, &object->oob_event_mask);
  169. clear_bit(event, &object->events);
  170. goto execute_work_state;
  171. }
  172. }
  173. }
  174. /* Wait states are just transition tables */
  175. if (!state->work) {
  176. if (events & event_mask) {
  177. for (t = state->transitions; t->events; t++) {
  178. if (events & t->events) {
  179. new_state = t->transit_to;
  180. event = fls(events & t->events) - 1;
  181. clear_bit(event, &object->events);
  182. _debug("{OBJ%x} ev %d: %s -> %s",
  183. object->debug_id, event,
  184. state->name, new_state->name);
  185. object->state = state = new_state;
  186. goto execute_work_state;
  187. }
  188. }
  189. /* The event mask didn't include all the tabled bits */
  190. BUG();
  191. }
  192. /* Randomly woke up */
  193. goto unmask_events;
  194. }
  195. execute_work_state:
  196. _debug("{OBJ%x} exec %s", object->debug_id, state->name);
  197. new_state = state->work(object, event);
  198. event = -1;
  199. if (new_state == NO_TRANSIT) {
  200. _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
  201. fscache_enqueue_object(object);
  202. event_mask = object->oob_event_mask;
  203. goto unmask_events;
  204. }
  205. _debug("{OBJ%x} %s -> %s",
  206. object->debug_id, state->name, new_state->name);
  207. object->state = state = new_state;
  208. if (state->work) {
  209. if (unlikely(state->work == ((void *)2UL))) {
  210. _leave(" [dead]");
  211. return;
  212. }
  213. goto restart_masked;
  214. }
  215. /* Transited to wait state */
  216. event_mask = object->oob_event_mask;
  217. for (t = state->transitions; t->events; t++)
  218. event_mask |= t->events;
  219. unmask_events:
  220. object->event_mask = event_mask;
  221. smp_mb();
  222. events = object->events;
  223. if (events & event_mask)
  224. goto restart;
  225. _leave(" [msk %lx]", event_mask);
  226. }
  227. /*
  228. * execute an object
  229. */
  230. static void fscache_object_work_func(struct work_struct *work)
  231. {
  232. struct fscache_object *object =
  233. container_of(work, struct fscache_object, work);
  234. unsigned long start;
  235. _enter("{OBJ%x}", object->debug_id);
  236. start = jiffies;
  237. fscache_object_sm_dispatcher(object);
  238. fscache_hist(fscache_objs_histogram, start);
  239. fscache_put_object(object);
  240. }
  241. /**
  242. * fscache_object_init - Initialise a cache object description
  243. * @object: Object description
  244. * @cookie: Cookie object will be attached to
  245. * @cache: Cache in which backing object will be found
  246. *
  247. * Initialise a cache object description to its basic values.
  248. *
  249. * See Documentation/filesystems/caching/backend-api.txt for a complete
  250. * description.
  251. */
  252. void fscache_object_init(struct fscache_object *object,
  253. struct fscache_cookie *cookie,
  254. struct fscache_cache *cache)
  255. {
  256. const struct fscache_transition *t;
  257. atomic_inc(&cache->object_count);
  258. object->state = STATE(WAIT_FOR_INIT);
  259. object->oob_table = fscache_osm_init_oob;
  260. object->flags = 1 << FSCACHE_OBJECT_IS_LIVE;
  261. spin_lock_init(&object->lock);
  262. INIT_LIST_HEAD(&object->cache_link);
  263. INIT_HLIST_NODE(&object->cookie_link);
  264. INIT_WORK(&object->work, fscache_object_work_func);
  265. INIT_LIST_HEAD(&object->dependents);
  266. INIT_LIST_HEAD(&object->dep_link);
  267. INIT_LIST_HEAD(&object->pending_ops);
  268. object->n_children = 0;
  269. object->n_ops = object->n_in_progress = object->n_exclusive = 0;
  270. object->events = 0;
  271. object->store_limit = 0;
  272. object->store_limit_l = 0;
  273. object->cache = cache;
  274. object->cookie = cookie;
  275. object->parent = NULL;
  276. #ifdef CONFIG_FSCACHE_OBJECT_LIST
  277. RB_CLEAR_NODE(&object->objlist_link);
  278. #endif
  279. object->oob_event_mask = 0;
  280. for (t = object->oob_table; t->events; t++)
  281. object->oob_event_mask |= t->events;
  282. object->event_mask = object->oob_event_mask;
  283. for (t = object->state->transitions; t->events; t++)
  284. object->event_mask |= t->events;
  285. }
  286. EXPORT_SYMBOL(fscache_object_init);
  287. /*
  288. * Mark the object as no longer being live, making sure that we synchronise
  289. * against op submission.
  290. */
  291. static inline void fscache_mark_object_dead(struct fscache_object *object)
  292. {
  293. spin_lock(&object->lock);
  294. clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
  295. spin_unlock(&object->lock);
  296. }
  297. /*
  298. * Abort object initialisation before we start it.
  299. */
  300. static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object,
  301. int event)
  302. {
  303. _enter("{OBJ%x},%d", object->debug_id, event);
  304. object->oob_event_mask = 0;
  305. fscache_dequeue_object(object);
  306. return transit_to(KILL_OBJECT);
  307. }
  308. /*
  309. * initialise an object
  310. * - check the specified object's parent to see if we can make use of it
  311. * immediately to do a creation
  312. * - we may need to start the process of creating a parent and we need to wait
  313. * for the parent's lookup and creation to complete if it's not there yet
  314. */
  315. static const struct fscache_state *fscache_initialise_object(struct fscache_object *object,
  316. int event)
  317. {
  318. struct fscache_object *parent;
  319. bool success;
  320. _enter("{OBJ%x},%d", object->debug_id, event);
  321. ASSERT(list_empty(&object->dep_link));
  322. parent = object->parent;
  323. if (!parent) {
  324. _leave(" [no parent]");
  325. return transit_to(DROP_OBJECT);
  326. }
  327. _debug("parent: %s of:%lx", parent->state->name, parent->flags);
  328. if (fscache_object_is_dying(parent)) {
  329. _leave(" [bad parent]");
  330. return transit_to(DROP_OBJECT);
  331. }
  332. if (fscache_object_is_available(parent)) {
  333. _leave(" [ready]");
  334. return transit_to(PARENT_READY);
  335. }
  336. _debug("wait");
  337. spin_lock(&parent->lock);
  338. fscache_stat(&fscache_n_cop_grab_object);
  339. success = false;
  340. if (fscache_object_is_live(parent) &&
  341. object->cache->ops->grab_object(object)) {
  342. list_add(&object->dep_link, &parent->dependents);
  343. success = true;
  344. }
  345. fscache_stat_d(&fscache_n_cop_grab_object);
  346. spin_unlock(&parent->lock);
  347. if (!success) {
  348. _leave(" [grab failed]");
  349. return transit_to(DROP_OBJECT);
  350. }
  351. /* fscache_acquire_non_index_cookie() uses this
  352. * to wake the chain up */
  353. fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD);
  354. _leave(" [wait]");
  355. return transit_to(WAIT_FOR_PARENT);
  356. }
  357. /*
  358. * Once the parent object is ready, we should kick off our lookup op.
  359. */
  360. static const struct fscache_state *fscache_parent_ready(struct fscache_object *object,
  361. int event)
  362. {
  363. struct fscache_object *parent = object->parent;
  364. _enter("{OBJ%x},%d", object->debug_id, event);
  365. ASSERT(parent != NULL);
  366. spin_lock(&parent->lock);
  367. parent->n_ops++;
  368. parent->n_obj_ops++;
  369. object->lookup_jif = jiffies;
  370. spin_unlock(&parent->lock);
  371. _leave("");
  372. return transit_to(LOOK_UP_OBJECT);
  373. }
  374. /*
  375. * look an object up in the cache from which it was allocated
  376. * - we hold an "access lock" on the parent object, so the parent object cannot
  377. * be withdrawn by either party till we've finished
  378. */
  379. static const struct fscache_state *fscache_look_up_object(struct fscache_object *object,
  380. int event)
  381. {
  382. struct fscache_cookie *cookie = object->cookie;
  383. struct fscache_object *parent = object->parent;
  384. int ret;
  385. _enter("{OBJ%x},%d", object->debug_id, event);
  386. object->oob_table = fscache_osm_lookup_oob;
  387. ASSERT(parent != NULL);
  388. ASSERTCMP(parent->n_ops, >, 0);
  389. ASSERTCMP(parent->n_obj_ops, >, 0);
  390. /* make sure the parent is still available */
  391. ASSERT(fscache_object_is_available(parent));
  392. if (fscache_object_is_dying(parent) ||
  393. test_bit(FSCACHE_IOERROR, &object->cache->flags) ||
  394. !fscache_use_cookie(object)) {
  395. _leave(" [unavailable]");
  396. return transit_to(LOOKUP_FAILURE);
  397. }
  398. _debug("LOOKUP \"%s\" in \"%s\"",
  399. cookie->def->name, object->cache->tag->name);
  400. fscache_stat(&fscache_n_object_lookups);
  401. fscache_stat(&fscache_n_cop_lookup_object);
  402. ret = object->cache->ops->lookup_object(object);
  403. fscache_stat_d(&fscache_n_cop_lookup_object);
  404. fscache_unuse_cookie(object);
  405. if (ret == -ETIMEDOUT) {
  406. /* probably stuck behind another object, so move this one to
  407. * the back of the queue */
  408. fscache_stat(&fscache_n_object_lookups_timed_out);
  409. _leave(" [timeout]");
  410. return NO_TRANSIT;
  411. }
  412. if (ret < 0) {
  413. _leave(" [error]");
  414. return transit_to(LOOKUP_FAILURE);
  415. }
  416. _leave(" [ok]");
  417. return transit_to(OBJECT_AVAILABLE);
  418. }
  419. /**
  420. * fscache_object_lookup_negative - Note negative cookie lookup
  421. * @object: Object pointing to cookie to mark
  422. *
  423. * Note negative lookup, permitting those waiting to read data from an already
  424. * existing backing object to continue as there's no data for them to read.
  425. */
  426. void fscache_object_lookup_negative(struct fscache_object *object)
  427. {
  428. struct fscache_cookie *cookie = object->cookie;
  429. _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
  430. if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
  431. fscache_stat(&fscache_n_object_lookups_negative);
  432. /* Allow write requests to begin stacking up and read requests to begin
  433. * returning ENODATA.
  434. */
  435. set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  436. clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
  437. _debug("wake up lookup %p", &cookie->flags);
  438. clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
  439. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
  440. }
  441. _leave("");
  442. }
  443. EXPORT_SYMBOL(fscache_object_lookup_negative);
  444. /**
  445. * fscache_obtained_object - Note successful object lookup or creation
  446. * @object: Object pointing to cookie to mark
  447. *
  448. * Note successful lookup and/or creation, permitting those waiting to write
  449. * data to a backing object to continue.
  450. *
  451. * Note that after calling this, an object's cookie may be relinquished by the
  452. * netfs, and so must be accessed with object lock held.
  453. */
  454. void fscache_obtained_object(struct fscache_object *object)
  455. {
  456. struct fscache_cookie *cookie = object->cookie;
  457. _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
  458. /* if we were still looking up, then we must have a positive lookup
  459. * result, in which case there may be data available */
  460. if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
  461. fscache_stat(&fscache_n_object_lookups_positive);
  462. /* We do (presumably) have data */
  463. clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  464. clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
  465. /* Allow write requests to begin stacking up and read requests
  466. * to begin shovelling data.
  467. */
  468. clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
  469. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
  470. } else {
  471. fscache_stat(&fscache_n_object_created);
  472. }
  473. set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
  474. _leave("");
  475. }
  476. EXPORT_SYMBOL(fscache_obtained_object);
  477. /*
  478. * handle an object that has just become available
  479. */
  480. static const struct fscache_state *fscache_object_available(struct fscache_object *object,
  481. int event)
  482. {
  483. _enter("{OBJ%x},%d", object->debug_id, event);
  484. object->oob_table = fscache_osm_run_oob;
  485. spin_lock(&object->lock);
  486. fscache_done_parent_op(object);
  487. if (object->n_in_progress == 0) {
  488. if (object->n_ops > 0) {
  489. ASSERTCMP(object->n_ops, >=, object->n_obj_ops);
  490. fscache_start_operations(object);
  491. } else {
  492. ASSERT(list_empty(&object->pending_ops));
  493. }
  494. }
  495. spin_unlock(&object->lock);
  496. fscache_stat(&fscache_n_cop_lookup_complete);
  497. object->cache->ops->lookup_complete(object);
  498. fscache_stat_d(&fscache_n_cop_lookup_complete);
  499. fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
  500. fscache_stat(&fscache_n_object_avail);
  501. _leave("");
  502. return transit_to(JUMPSTART_DEPS);
  503. }
  504. /*
  505. * Wake up this object's dependent objects now that we've become available.
  506. */
  507. static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object,
  508. int event)
  509. {
  510. _enter("{OBJ%x},%d", object->debug_id, event);
  511. if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY))
  512. return NO_TRANSIT; /* Not finished; requeue */
  513. return transit_to(WAIT_FOR_CMD);
  514. }
  515. /*
  516. * Handle lookup or creation failute.
  517. */
  518. static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object,
  519. int event)
  520. {
  521. struct fscache_cookie *cookie;
  522. _enter("{OBJ%x},%d", object->debug_id, event);
  523. object->oob_event_mask = 0;
  524. fscache_stat(&fscache_n_cop_lookup_complete);
  525. object->cache->ops->lookup_complete(object);
  526. fscache_stat_d(&fscache_n_cop_lookup_complete);
  527. set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags);
  528. cookie = object->cookie;
  529. set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
  530. if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
  531. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
  532. fscache_done_parent_op(object);
  533. return transit_to(KILL_OBJECT);
  534. }
  535. /*
  536. * Wait for completion of all active operations on this object and the death of
  537. * all child objects of this object.
  538. */
  539. static const struct fscache_state *fscache_kill_object(struct fscache_object *object,
  540. int event)
  541. {
  542. _enter("{OBJ%x,%d,%d},%d",
  543. object->debug_id, object->n_ops, object->n_children, event);
  544. fscache_mark_object_dead(object);
  545. object->oob_event_mask = 0;
  546. if (list_empty(&object->dependents) &&
  547. object->n_ops == 0 &&
  548. object->n_children == 0)
  549. return transit_to(DROP_OBJECT);
  550. if (object->n_in_progress == 0) {
  551. spin_lock(&object->lock);
  552. if (object->n_ops > 0 && object->n_in_progress == 0)
  553. fscache_start_operations(object);
  554. spin_unlock(&object->lock);
  555. }
  556. if (!list_empty(&object->dependents))
  557. return transit_to(KILL_DEPENDENTS);
  558. return transit_to(WAIT_FOR_CLEARANCE);
  559. }
  560. /*
  561. * Kill dependent objects.
  562. */
  563. static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object,
  564. int event)
  565. {
  566. _enter("{OBJ%x},%d", object->debug_id, event);
  567. if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL))
  568. return NO_TRANSIT; /* Not finished */
  569. return transit_to(WAIT_FOR_CLEARANCE);
  570. }
  571. /*
  572. * Drop an object's attachments
  573. */
  574. static const struct fscache_state *fscache_drop_object(struct fscache_object *object,
  575. int event)
  576. {
  577. struct fscache_object *parent = object->parent;
  578. struct fscache_cookie *cookie = object->cookie;
  579. struct fscache_cache *cache = object->cache;
  580. bool awaken = false;
  581. _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event);
  582. ASSERT(cookie != NULL);
  583. ASSERT(!hlist_unhashed(&object->cookie_link));
  584. /* Make sure the cookie no longer points here and that the netfs isn't
  585. * waiting for us.
  586. */
  587. spin_lock(&cookie->lock);
  588. hlist_del_init(&object->cookie_link);
  589. if (hlist_empty(&cookie->backing_objects) &&
  590. test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
  591. awaken = true;
  592. spin_unlock(&cookie->lock);
  593. if (awaken)
  594. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
  595. /* Prevent a race with our last child, which has to signal EV_CLEARED
  596. * before dropping our spinlock.
  597. */
  598. spin_lock(&object->lock);
  599. spin_unlock(&object->lock);
  600. /* Discard from the cache's collection of objects */
  601. spin_lock(&cache->object_list_lock);
  602. list_del_init(&object->cache_link);
  603. spin_unlock(&cache->object_list_lock);
  604. fscache_stat(&fscache_n_cop_drop_object);
  605. cache->ops->drop_object(object);
  606. fscache_stat_d(&fscache_n_cop_drop_object);
  607. /* The parent object wants to know when all it dependents have gone */
  608. if (parent) {
  609. _debug("release parent OBJ%x {%d}",
  610. parent->debug_id, parent->n_children);
  611. spin_lock(&parent->lock);
  612. parent->n_children--;
  613. if (parent->n_children == 0)
  614. fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
  615. spin_unlock(&parent->lock);
  616. object->parent = NULL;
  617. }
  618. /* this just shifts the object release to the work processor */
  619. fscache_put_object(object);
  620. fscache_stat(&fscache_n_object_dead);
  621. _leave("");
  622. return transit_to(OBJECT_DEAD);
  623. }
  624. /*
  625. * get a ref on an object
  626. */
  627. static int fscache_get_object(struct fscache_object *object)
  628. {
  629. int ret;
  630. fscache_stat(&fscache_n_cop_grab_object);
  631. ret = object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
  632. fscache_stat_d(&fscache_n_cop_grab_object);
  633. return ret;
  634. }
  635. /*
  636. * Discard a ref on an object
  637. */
  638. static void fscache_put_object(struct fscache_object *object)
  639. {
  640. fscache_stat(&fscache_n_cop_put_object);
  641. object->cache->ops->put_object(object);
  642. fscache_stat_d(&fscache_n_cop_put_object);
  643. }
  644. /**
  645. * fscache_object_destroy - Note that a cache object is about to be destroyed
  646. * @object: The object to be destroyed
  647. *
  648. * Note the imminent destruction and deallocation of a cache object record.
  649. */
  650. void fscache_object_destroy(struct fscache_object *object)
  651. {
  652. fscache_objlist_remove(object);
  653. /* We can get rid of the cookie now */
  654. fscache_cookie_put(object->cookie);
  655. object->cookie = NULL;
  656. }
  657. EXPORT_SYMBOL(fscache_object_destroy);
  658. /*
  659. * enqueue an object for metadata-type processing
  660. */
  661. void fscache_enqueue_object(struct fscache_object *object)
  662. {
  663. _enter("{OBJ%x}", object->debug_id);
  664. if (fscache_get_object(object) >= 0) {
  665. wait_queue_head_t *cong_wq =
  666. &get_cpu_var(fscache_object_cong_wait);
  667. if (queue_work(fscache_object_wq, &object->work)) {
  668. if (fscache_object_congested())
  669. wake_up(cong_wq);
  670. } else
  671. fscache_put_object(object);
  672. put_cpu_var(fscache_object_cong_wait);
  673. }
  674. }
  675. /**
  676. * fscache_object_sleep_till_congested - Sleep until object wq is congested
  677. * @timeoutp: Scheduler sleep timeout
  678. *
  679. * Allow an object handler to sleep until the object workqueue is congested.
  680. *
  681. * The caller must set up a wake up event before calling this and must have set
  682. * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
  683. * condition before calling this function as no test is made here.
  684. *
  685. * %true is returned if the object wq is congested, %false otherwise.
  686. */
  687. bool fscache_object_sleep_till_congested(signed long *timeoutp)
  688. {
  689. wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
  690. DEFINE_WAIT(wait);
  691. if (fscache_object_congested())
  692. return true;
  693. add_wait_queue_exclusive(cong_wq, &wait);
  694. if (!fscache_object_congested())
  695. *timeoutp = schedule_timeout(*timeoutp);
  696. finish_wait(cong_wq, &wait);
  697. return fscache_object_congested();
  698. }
  699. EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested);
  700. /*
  701. * Enqueue the dependents of an object for metadata-type processing.
  702. *
  703. * If we don't manage to finish the list before the scheduler wants to run
  704. * again then return false immediately. We return true if the list was
  705. * cleared.
  706. */
  707. static bool fscache_enqueue_dependents(struct fscache_object *object, int event)
  708. {
  709. struct fscache_object *dep;
  710. bool ret = true;
  711. _enter("{OBJ%x}", object->debug_id);
  712. if (list_empty(&object->dependents))
  713. return true;
  714. spin_lock(&object->lock);
  715. while (!list_empty(&object->dependents)) {
  716. dep = list_entry(object->dependents.next,
  717. struct fscache_object, dep_link);
  718. list_del_init(&dep->dep_link);
  719. fscache_raise_event(dep, event);
  720. fscache_put_object(dep);
  721. if (!list_empty(&object->dependents) && need_resched()) {
  722. ret = false;
  723. break;
  724. }
  725. }
  726. spin_unlock(&object->lock);
  727. return ret;
  728. }
  729. /*
  730. * remove an object from whatever queue it's waiting on
  731. */
  732. static void fscache_dequeue_object(struct fscache_object *object)
  733. {
  734. _enter("{OBJ%x}", object->debug_id);
  735. if (!list_empty(&object->dep_link)) {
  736. spin_lock(&object->parent->lock);
  737. list_del_init(&object->dep_link);
  738. spin_unlock(&object->parent->lock);
  739. }
  740. _leave("");
  741. }
  742. /**
  743. * fscache_check_aux - Ask the netfs whether an object on disk is still valid
  744. * @object: The object to ask about
  745. * @data: The auxiliary data for the object
  746. * @datalen: The size of the auxiliary data
  747. *
  748. * This function consults the netfs about the coherency state of an object.
  749. * The caller must be holding a ref on cookie->n_active (held by
  750. * fscache_look_up_object() on behalf of the cache backend during object lookup
  751. * and creation).
  752. */
  753. enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
  754. const void *data, uint16_t datalen)
  755. {
  756. enum fscache_checkaux result;
  757. if (!object->cookie->def->check_aux) {
  758. fscache_stat(&fscache_n_checkaux_none);
  759. return FSCACHE_CHECKAUX_OKAY;
  760. }
  761. result = object->cookie->def->check_aux(object->cookie->netfs_data,
  762. data, datalen);
  763. switch (result) {
  764. /* entry okay as is */
  765. case FSCACHE_CHECKAUX_OKAY:
  766. fscache_stat(&fscache_n_checkaux_okay);
  767. break;
  768. /* entry requires update */
  769. case FSCACHE_CHECKAUX_NEEDS_UPDATE:
  770. fscache_stat(&fscache_n_checkaux_update);
  771. break;
  772. /* entry requires deletion */
  773. case FSCACHE_CHECKAUX_OBSOLETE:
  774. fscache_stat(&fscache_n_checkaux_obsolete);
  775. break;
  776. default:
  777. BUG();
  778. }
  779. return result;
  780. }
  781. EXPORT_SYMBOL(fscache_check_aux);
  782. /*
  783. * Asynchronously invalidate an object.
  784. */
  785. static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object,
  786. int event)
  787. {
  788. struct fscache_operation *op;
  789. struct fscache_cookie *cookie = object->cookie;
  790. _enter("{OBJ%x},%d", object->debug_id, event);
  791. /* We're going to need the cookie. If the cookie is not available then
  792. * retire the object instead.
  793. */
  794. if (!fscache_use_cookie(object)) {
  795. ASSERT(object->cookie->stores.rnode == NULL);
  796. set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
  797. _leave(" [no cookie]");
  798. return transit_to(KILL_OBJECT);
  799. }
  800. /* Reject any new read/write ops and abort any that are pending. */
  801. fscache_invalidate_writes(cookie);
  802. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  803. fscache_cancel_all_ops(object);
  804. /* Now we have to wait for in-progress reads and writes */
  805. op = kzalloc(sizeof(*op), GFP_KERNEL);
  806. if (!op)
  807. goto nomem;
  808. fscache_operation_init(op, object->cache->ops->invalidate_object,
  809. NULL, NULL);
  810. op->flags = FSCACHE_OP_ASYNC |
  811. (1 << FSCACHE_OP_EXCLUSIVE) |
  812. (1 << FSCACHE_OP_UNUSE_COOKIE);
  813. spin_lock(&cookie->lock);
  814. if (fscache_submit_exclusive_op(object, op) < 0)
  815. goto submit_op_failed;
  816. spin_unlock(&cookie->lock);
  817. fscache_put_operation(op);
  818. /* Once we've completed the invalidation, we know there will be no data
  819. * stored in the cache and thus we can reinstate the data-check-skip
  820. * optimisation.
  821. */
  822. set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  823. /* We can allow read and write requests to come in once again. They'll
  824. * queue up behind our exclusive invalidation operation.
  825. */
  826. if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
  827. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
  828. _leave(" [ok]");
  829. return transit_to(UPDATE_OBJECT);
  830. nomem:
  831. fscache_mark_object_dead(object);
  832. fscache_unuse_cookie(object);
  833. _leave(" [ENOMEM]");
  834. return transit_to(KILL_OBJECT);
  835. submit_op_failed:
  836. fscache_mark_object_dead(object);
  837. spin_unlock(&cookie->lock);
  838. fscache_unuse_cookie(object);
  839. kfree(op);
  840. _leave(" [EIO]");
  841. return transit_to(KILL_OBJECT);
  842. }
  843. static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object,
  844. int event)
  845. {
  846. const struct fscache_state *s;
  847. fscache_stat(&fscache_n_invalidates_run);
  848. fscache_stat(&fscache_n_cop_invalidate_object);
  849. s = _fscache_invalidate_object(object, event);
  850. fscache_stat_d(&fscache_n_cop_invalidate_object);
  851. return s;
  852. }
  853. /*
  854. * Asynchronously update an object.
  855. */
  856. static const struct fscache_state *fscache_update_object(struct fscache_object *object,
  857. int event)
  858. {
  859. _enter("{OBJ%x},%d", object->debug_id, event);
  860. fscache_stat(&fscache_n_updates_run);
  861. fscache_stat(&fscache_n_cop_update_object);
  862. object->cache->ops->update_object(object);
  863. fscache_stat_d(&fscache_n_cop_update_object);
  864. _leave("");
  865. return transit_to(WAIT_FOR_CMD);
  866. }
  867. /**
  868. * fscache_object_retrying_stale - Note retrying stale object
  869. * @object: The object that will be retried
  870. *
  871. * Note that an object lookup found an on-disk object that was adjudged to be
  872. * stale and has been deleted. The lookup will be retried.
  873. */
  874. void fscache_object_retrying_stale(struct fscache_object *object)
  875. {
  876. fscache_stat(&fscache_n_cache_no_space_reject);
  877. }
  878. EXPORT_SYMBOL(fscache_object_retrying_stale);
  879. /**
  880. * fscache_object_mark_killed - Note that an object was killed
  881. * @object: The object that was culled
  882. * @why: The reason the object was killed.
  883. *
  884. * Note that an object was killed. Returns true if the object was
  885. * already marked killed, false if it wasn't.
  886. */
  887. void fscache_object_mark_killed(struct fscache_object *object,
  888. enum fscache_why_object_killed why)
  889. {
  890. if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) {
  891. pr_err("Error: Object already killed by cache [%s]\n",
  892. object->cache->identifier);
  893. return;
  894. }
  895. switch (why) {
  896. case FSCACHE_OBJECT_NO_SPACE:
  897. fscache_stat(&fscache_n_cache_no_space_reject);
  898. break;
  899. case FSCACHE_OBJECT_IS_STALE:
  900. fscache_stat(&fscache_n_cache_stale_objects);
  901. break;
  902. case FSCACHE_OBJECT_WAS_RETIRED:
  903. fscache_stat(&fscache_n_cache_retired_objects);
  904. break;
  905. case FSCACHE_OBJECT_WAS_CULLED:
  906. fscache_stat(&fscache_n_cache_culled_objects);
  907. break;
  908. }
  909. }
  910. EXPORT_SYMBOL(fscache_object_mark_killed);