nfs4session.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. /*
  2. * fs/nfs/nfs4session.c
  3. *
  4. * Copyright (c) 2012 Trond Myklebust <Trond.Myklebust@netapp.com>
  5. *
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/errno.h>
  9. #include <linux/string.h>
  10. #include <linux/printk.h>
  11. #include <linux/slab.h>
  12. #include <linux/sunrpc/sched.h>
  13. #include <linux/sunrpc/bc_xprt.h>
  14. #include <linux/nfs.h>
  15. #include <linux/nfs4.h>
  16. #include <linux/nfs_fs.h>
  17. #include <linux/module.h>
  18. #include "nfs4_fs.h"
  19. #include "internal.h"
  20. #include "nfs4session.h"
  21. #include "callback.h"
  22. #define NFSDBG_FACILITY NFSDBG_STATE
  23. static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue)
  24. {
  25. tbl->highest_used_slotid = NFS4_NO_SLOT;
  26. spin_lock_init(&tbl->slot_tbl_lock);
  27. rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue);
  28. init_waitqueue_head(&tbl->slot_waitq);
  29. init_completion(&tbl->complete);
  30. }
  31. /*
  32. * nfs4_shrink_slot_table - free retired slots from the slot table
  33. */
  34. static void nfs4_shrink_slot_table(struct nfs4_slot_table *tbl, u32 newsize)
  35. {
  36. struct nfs4_slot **p;
  37. if (newsize >= tbl->max_slots)
  38. return;
  39. p = &tbl->slots;
  40. while (newsize--)
  41. p = &(*p)->next;
  42. while (*p) {
  43. struct nfs4_slot *slot = *p;
  44. *p = slot->next;
  45. kfree(slot);
  46. tbl->max_slots--;
  47. }
  48. }
  49. /**
  50. * nfs4_slot_tbl_drain_complete - wake waiters when drain is complete
  51. * @tbl - controlling slot table
  52. *
  53. */
  54. void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl)
  55. {
  56. if (nfs4_slot_tbl_draining(tbl))
  57. complete(&tbl->complete);
  58. }
  59. /*
  60. * nfs4_free_slot - free a slot and efficiently update slot table.
  61. *
  62. * freeing a slot is trivially done by clearing its respective bit
  63. * in the bitmap.
  64. * If the freed slotid equals highest_used_slotid we want to update it
  65. * so that the server would be able to size down the slot table if needed,
  66. * otherwise we know that the highest_used_slotid is still in use.
  67. * When updating highest_used_slotid there may be "holes" in the bitmap
  68. * so we need to scan down from highest_used_slotid to 0 looking for the now
  69. * highest slotid in use.
  70. * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
  71. *
  72. * Must be called while holding tbl->slot_tbl_lock
  73. */
  74. void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
  75. {
  76. u32 slotid = slot->slot_nr;
  77. /* clear used bit in bitmap */
  78. __clear_bit(slotid, tbl->used_slots);
  79. /* update highest_used_slotid when it is freed */
  80. if (slotid == tbl->highest_used_slotid) {
  81. u32 new_max = find_last_bit(tbl->used_slots, slotid);
  82. if (new_max < slotid)
  83. tbl->highest_used_slotid = new_max;
  84. else {
  85. tbl->highest_used_slotid = NFS4_NO_SLOT;
  86. nfs4_slot_tbl_drain_complete(tbl);
  87. }
  88. }
  89. dprintk("%s: slotid %u highest_used_slotid %u\n", __func__,
  90. slotid, tbl->highest_used_slotid);
  91. }
  92. static struct nfs4_slot *nfs4_new_slot(struct nfs4_slot_table *tbl,
  93. u32 slotid, u32 seq_init, gfp_t gfp_mask)
  94. {
  95. struct nfs4_slot *slot;
  96. slot = kzalloc(sizeof(*slot), gfp_mask);
  97. if (slot) {
  98. slot->table = tbl;
  99. slot->slot_nr = slotid;
  100. slot->seq_nr = seq_init;
  101. }
  102. return slot;
  103. }
  104. static struct nfs4_slot *nfs4_find_or_create_slot(struct nfs4_slot_table *tbl,
  105. u32 slotid, u32 seq_init, gfp_t gfp_mask)
  106. {
  107. struct nfs4_slot **p, *slot;
  108. p = &tbl->slots;
  109. for (;;) {
  110. if (*p == NULL) {
  111. *p = nfs4_new_slot(tbl, tbl->max_slots,
  112. seq_init, gfp_mask);
  113. if (*p == NULL)
  114. break;
  115. tbl->max_slots++;
  116. }
  117. slot = *p;
  118. if (slot->slot_nr == slotid)
  119. return slot;
  120. p = &slot->next;
  121. }
  122. return ERR_PTR(-ENOMEM);
  123. }
  124. static void nfs4_lock_slot(struct nfs4_slot_table *tbl,
  125. struct nfs4_slot *slot)
  126. {
  127. u32 slotid = slot->slot_nr;
  128. __set_bit(slotid, tbl->used_slots);
  129. if (slotid > tbl->highest_used_slotid ||
  130. tbl->highest_used_slotid == NFS4_NO_SLOT)
  131. tbl->highest_used_slotid = slotid;
  132. slot->generation = tbl->generation;
  133. }
  134. /*
  135. * nfs4_try_to_lock_slot - Given a slot try to allocate it
  136. *
  137. * Note: must be called with the slot_tbl_lock held.
  138. */
  139. bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
  140. {
  141. if (nfs4_test_locked_slot(tbl, slot->slot_nr))
  142. return false;
  143. nfs4_lock_slot(tbl, slot);
  144. return true;
  145. }
  146. /*
  147. * nfs4_lookup_slot - Find a slot but don't allocate it
  148. *
  149. * Note: must be called with the slot_tbl_lock held.
  150. */
  151. struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid)
  152. {
  153. if (slotid <= tbl->max_slotid)
  154. return nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
  155. return ERR_PTR(-E2BIG);
  156. }
  157. static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid,
  158. u32 *seq_nr)
  159. __must_hold(&tbl->slot_tbl_lock)
  160. {
  161. struct nfs4_slot *slot;
  162. int ret;
  163. slot = nfs4_lookup_slot(tbl, slotid);
  164. ret = PTR_ERR_OR_ZERO(slot);
  165. if (!ret)
  166. *seq_nr = slot->seq_nr;
  167. return ret;
  168. }
  169. /*
  170. * nfs4_slot_seqid_in_use - test if a slot sequence id is still in use
  171. *
  172. * Given a slot table, slot id and sequence number, determine if the
  173. * RPC call in question is still in flight. This function is mainly
  174. * intended for use by the callback channel.
  175. */
  176. static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl,
  177. u32 slotid, u32 seq_nr)
  178. {
  179. u32 cur_seq = 0;
  180. bool ret = false;
  181. spin_lock(&tbl->slot_tbl_lock);
  182. if (nfs4_slot_get_seqid(tbl, slotid, &cur_seq) == 0 &&
  183. cur_seq == seq_nr && test_bit(slotid, tbl->used_slots))
  184. ret = true;
  185. spin_unlock(&tbl->slot_tbl_lock);
  186. return ret;
  187. }
  188. /*
  189. * nfs4_slot_wait_on_seqid - wait until a slot sequence id is complete
  190. *
  191. * Given a slot table, slot id and sequence number, wait until the
  192. * corresponding RPC call completes. This function is mainly
  193. * intended for use by the callback channel.
  194. */
  195. int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
  196. u32 slotid, u32 seq_nr,
  197. unsigned long timeout)
  198. {
  199. if (wait_event_timeout(tbl->slot_waitq,
  200. !nfs4_slot_seqid_in_use(tbl, slotid, seq_nr),
  201. timeout) == 0)
  202. return -ETIMEDOUT;
  203. return 0;
  204. }
  205. /*
  206. * nfs4_alloc_slot - efficiently look for a free slot
  207. *
  208. * nfs4_alloc_slot looks for an unset bit in the used_slots bitmap.
  209. * If found, we mark the slot as used, update the highest_used_slotid,
  210. * and respectively set up the sequence operation args.
  211. *
  212. * Note: must be called with under the slot_tbl_lock.
  213. */
  214. struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl)
  215. {
  216. struct nfs4_slot *ret = ERR_PTR(-EBUSY);
  217. u32 slotid;
  218. dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
  219. __func__, tbl->used_slots[0], tbl->highest_used_slotid,
  220. tbl->max_slotid + 1);
  221. slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slotid + 1);
  222. if (slotid <= tbl->max_slotid) {
  223. ret = nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
  224. if (!IS_ERR(ret))
  225. nfs4_lock_slot(tbl, ret);
  226. }
  227. dprintk("<-- %s used_slots=%04lx highest_used=%u slotid=%u\n",
  228. __func__, tbl->used_slots[0], tbl->highest_used_slotid,
  229. !IS_ERR(ret) ? ret->slot_nr : NFS4_NO_SLOT);
  230. return ret;
  231. }
  232. static int nfs4_grow_slot_table(struct nfs4_slot_table *tbl,
  233. u32 max_reqs, u32 ivalue)
  234. {
  235. if (max_reqs <= tbl->max_slots)
  236. return 0;
  237. if (!IS_ERR(nfs4_find_or_create_slot(tbl, max_reqs - 1, ivalue, GFP_NOFS)))
  238. return 0;
  239. return -ENOMEM;
  240. }
  241. static void nfs4_reset_slot_table(struct nfs4_slot_table *tbl,
  242. u32 server_highest_slotid,
  243. u32 ivalue)
  244. {
  245. struct nfs4_slot **p;
  246. nfs4_shrink_slot_table(tbl, server_highest_slotid + 1);
  247. p = &tbl->slots;
  248. while (*p) {
  249. (*p)->seq_nr = ivalue;
  250. (*p)->interrupted = 0;
  251. p = &(*p)->next;
  252. }
  253. tbl->highest_used_slotid = NFS4_NO_SLOT;
  254. tbl->target_highest_slotid = server_highest_slotid;
  255. tbl->server_highest_slotid = server_highest_slotid;
  256. tbl->d_target_highest_slotid = 0;
  257. tbl->d2_target_highest_slotid = 0;
  258. tbl->max_slotid = server_highest_slotid;
  259. }
  260. /*
  261. * (re)Initialise a slot table
  262. */
  263. static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl,
  264. u32 max_reqs, u32 ivalue)
  265. {
  266. int ret;
  267. dprintk("--> %s: max_reqs=%u, tbl->max_slots %u\n", __func__,
  268. max_reqs, tbl->max_slots);
  269. if (max_reqs > NFS4_MAX_SLOT_TABLE)
  270. max_reqs = NFS4_MAX_SLOT_TABLE;
  271. ret = nfs4_grow_slot_table(tbl, max_reqs, ivalue);
  272. if (ret)
  273. goto out;
  274. spin_lock(&tbl->slot_tbl_lock);
  275. nfs4_reset_slot_table(tbl, max_reqs - 1, ivalue);
  276. spin_unlock(&tbl->slot_tbl_lock);
  277. dprintk("%s: tbl=%p slots=%p max_slots=%u\n", __func__,
  278. tbl, tbl->slots, tbl->max_slots);
  279. out:
  280. dprintk("<-- %s: return %d\n", __func__, ret);
  281. return ret;
  282. }
  283. /*
  284. * nfs4_release_slot_table - release all slot table entries
  285. */
  286. static void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
  287. {
  288. nfs4_shrink_slot_table(tbl, 0);
  289. }
  290. /**
  291. * nfs4_shutdown_slot_table - release resources attached to a slot table
  292. * @tbl: slot table to shut down
  293. *
  294. */
  295. void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl)
  296. {
  297. nfs4_release_slot_table(tbl);
  298. rpc_destroy_wait_queue(&tbl->slot_tbl_waitq);
  299. }
  300. /**
  301. * nfs4_setup_slot_table - prepare a stand-alone slot table for use
  302. * @tbl: slot table to set up
  303. * @max_reqs: maximum number of requests allowed
  304. * @queue: name to give RPC wait queue
  305. *
  306. * Returns zero on success, or a negative errno.
  307. */
  308. int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, unsigned int max_reqs,
  309. const char *queue)
  310. {
  311. nfs4_init_slot_table(tbl, queue);
  312. return nfs4_realloc_slot_table(tbl, max_reqs, 0);
  313. }
  314. static bool nfs41_assign_slot(struct rpc_task *task, void *pslot)
  315. {
  316. struct nfs4_sequence_args *args = task->tk_msg.rpc_argp;
  317. struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
  318. struct nfs4_slot *slot = pslot;
  319. struct nfs4_slot_table *tbl = slot->table;
  320. if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
  321. return false;
  322. slot->generation = tbl->generation;
  323. args->sa_slot = slot;
  324. res->sr_timestamp = jiffies;
  325. res->sr_slot = slot;
  326. res->sr_status_flags = 0;
  327. res->sr_status = 1;
  328. return true;
  329. }
  330. static bool __nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
  331. struct nfs4_slot *slot)
  332. {
  333. if (rpc_wake_up_first(&tbl->slot_tbl_waitq, nfs41_assign_slot, slot))
  334. return true;
  335. return false;
  336. }
  337. bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
  338. struct nfs4_slot *slot)
  339. {
  340. if (slot->slot_nr > tbl->max_slotid)
  341. return false;
  342. return __nfs41_wake_and_assign_slot(tbl, slot);
  343. }
  344. static bool nfs41_try_wake_next_slot_table_entry(struct nfs4_slot_table *tbl)
  345. {
  346. struct nfs4_slot *slot = nfs4_alloc_slot(tbl);
  347. if (!IS_ERR(slot)) {
  348. bool ret = __nfs41_wake_and_assign_slot(tbl, slot);
  349. if (ret)
  350. return ret;
  351. nfs4_free_slot(tbl, slot);
  352. }
  353. return false;
  354. }
  355. void nfs41_wake_slot_table(struct nfs4_slot_table *tbl)
  356. {
  357. for (;;) {
  358. if (!nfs41_try_wake_next_slot_table_entry(tbl))
  359. break;
  360. }
  361. }
  362. #if defined(CONFIG_NFS_V4_1)
  363. static void nfs41_set_max_slotid_locked(struct nfs4_slot_table *tbl,
  364. u32 target_highest_slotid)
  365. {
  366. u32 max_slotid;
  367. max_slotid = min(NFS4_MAX_SLOT_TABLE - 1, target_highest_slotid);
  368. if (max_slotid > tbl->server_highest_slotid)
  369. max_slotid = tbl->server_highest_slotid;
  370. if (max_slotid > tbl->target_highest_slotid)
  371. max_slotid = tbl->target_highest_slotid;
  372. tbl->max_slotid = max_slotid;
  373. nfs41_wake_slot_table(tbl);
  374. }
  375. /* Update the client's idea of target_highest_slotid */
  376. static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl,
  377. u32 target_highest_slotid)
  378. {
  379. if (tbl->target_highest_slotid == target_highest_slotid)
  380. return;
  381. tbl->target_highest_slotid = target_highest_slotid;
  382. tbl->generation++;
  383. }
  384. void nfs41_set_target_slotid(struct nfs4_slot_table *tbl,
  385. u32 target_highest_slotid)
  386. {
  387. spin_lock(&tbl->slot_tbl_lock);
  388. nfs41_set_target_slotid_locked(tbl, target_highest_slotid);
  389. tbl->d_target_highest_slotid = 0;
  390. tbl->d2_target_highest_slotid = 0;
  391. nfs41_set_max_slotid_locked(tbl, target_highest_slotid);
  392. spin_unlock(&tbl->slot_tbl_lock);
  393. }
  394. static void nfs41_set_server_slotid_locked(struct nfs4_slot_table *tbl,
  395. u32 highest_slotid)
  396. {
  397. if (tbl->server_highest_slotid == highest_slotid)
  398. return;
  399. if (tbl->highest_used_slotid > highest_slotid)
  400. return;
  401. /* Deallocate slots */
  402. nfs4_shrink_slot_table(tbl, highest_slotid + 1);
  403. tbl->server_highest_slotid = highest_slotid;
  404. }
  405. static s32 nfs41_derivative_target_slotid(s32 s1, s32 s2)
  406. {
  407. s1 -= s2;
  408. if (s1 == 0)
  409. return 0;
  410. if (s1 < 0)
  411. return (s1 - 1) >> 1;
  412. return (s1 + 1) >> 1;
  413. }
  414. static int nfs41_sign_s32(s32 s1)
  415. {
  416. if (s1 > 0)
  417. return 1;
  418. if (s1 < 0)
  419. return -1;
  420. return 0;
  421. }
  422. static bool nfs41_same_sign_or_zero_s32(s32 s1, s32 s2)
  423. {
  424. if (!s1 || !s2)
  425. return true;
  426. return nfs41_sign_s32(s1) == nfs41_sign_s32(s2);
  427. }
  428. /* Try to eliminate outliers by checking for sharp changes in the
  429. * derivatives and second derivatives
  430. */
  431. static bool nfs41_is_outlier_target_slotid(struct nfs4_slot_table *tbl,
  432. u32 new_target)
  433. {
  434. s32 d_target, d2_target;
  435. bool ret = true;
  436. d_target = nfs41_derivative_target_slotid(new_target,
  437. tbl->target_highest_slotid);
  438. d2_target = nfs41_derivative_target_slotid(d_target,
  439. tbl->d_target_highest_slotid);
  440. /* Is first derivative same sign? */
  441. if (nfs41_same_sign_or_zero_s32(d_target, tbl->d_target_highest_slotid))
  442. ret = false;
  443. /* Is second derivative same sign? */
  444. if (nfs41_same_sign_or_zero_s32(d2_target, tbl->d2_target_highest_slotid))
  445. ret = false;
  446. tbl->d_target_highest_slotid = d_target;
  447. tbl->d2_target_highest_slotid = d2_target;
  448. return ret;
  449. }
  450. void nfs41_update_target_slotid(struct nfs4_slot_table *tbl,
  451. struct nfs4_slot *slot,
  452. struct nfs4_sequence_res *res)
  453. {
  454. spin_lock(&tbl->slot_tbl_lock);
  455. if (!nfs41_is_outlier_target_slotid(tbl, res->sr_target_highest_slotid))
  456. nfs41_set_target_slotid_locked(tbl, res->sr_target_highest_slotid);
  457. if (tbl->generation == slot->generation)
  458. nfs41_set_server_slotid_locked(tbl, res->sr_highest_slotid);
  459. nfs41_set_max_slotid_locked(tbl, res->sr_target_highest_slotid);
  460. spin_unlock(&tbl->slot_tbl_lock);
  461. }
  462. static void nfs4_release_session_slot_tables(struct nfs4_session *session)
  463. {
  464. nfs4_release_slot_table(&session->fc_slot_table);
  465. nfs4_release_slot_table(&session->bc_slot_table);
  466. }
  467. /*
  468. * Initialize or reset the forechannel and backchannel tables
  469. */
  470. int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
  471. {
  472. struct nfs4_slot_table *tbl;
  473. int status;
  474. dprintk("--> %s\n", __func__);
  475. /* Fore channel */
  476. tbl = &ses->fc_slot_table;
  477. tbl->session = ses;
  478. status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
  479. if (status || !(ses->flags & SESSION4_BACK_CHAN)) /* -ENOMEM */
  480. return status;
  481. /* Back channel */
  482. tbl = &ses->bc_slot_table;
  483. tbl->session = ses;
  484. status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
  485. if (status && tbl->slots == NULL)
  486. /* Fore and back channel share a connection so get
  487. * both slot tables or neither */
  488. nfs4_release_session_slot_tables(ses);
  489. return status;
  490. }
  491. struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
  492. {
  493. struct nfs4_session *session;
  494. session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
  495. if (!session)
  496. return NULL;
  497. nfs4_init_slot_table(&session->fc_slot_table, "ForeChannel Slot table");
  498. nfs4_init_slot_table(&session->bc_slot_table, "BackChannel Slot table");
  499. session->session_state = 1<<NFS4_SESSION_INITING;
  500. session->clp = clp;
  501. return session;
  502. }
  503. static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
  504. {
  505. nfs4_shutdown_slot_table(&session->fc_slot_table);
  506. nfs4_shutdown_slot_table(&session->bc_slot_table);
  507. }
  508. void nfs4_destroy_session(struct nfs4_session *session)
  509. {
  510. struct rpc_xprt *xprt;
  511. struct rpc_cred *cred;
  512. cred = nfs4_get_clid_cred(session->clp);
  513. nfs4_proc_destroy_session(session, cred);
  514. if (cred)
  515. put_rpccred(cred);
  516. rcu_read_lock();
  517. xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
  518. rcu_read_unlock();
  519. dprintk("%s Destroy backchannel for xprt %p\n",
  520. __func__, xprt);
  521. xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
  522. nfs4_destroy_session_slot_tables(session);
  523. kfree(session);
  524. }
  525. /*
  526. * With sessions, the client is not marked ready until after a
  527. * successful EXCHANGE_ID and CREATE_SESSION.
  528. *
  529. * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
  530. * other versions of NFS can be tried.
  531. */
  532. static int nfs41_check_session_ready(struct nfs_client *clp)
  533. {
  534. int ret;
  535. if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
  536. ret = nfs4_client_recover_expired_lease(clp);
  537. if (ret)
  538. return ret;
  539. }
  540. if (clp->cl_cons_state < NFS_CS_READY)
  541. return -EPROTONOSUPPORT;
  542. smp_rmb();
  543. return 0;
  544. }
  545. int nfs4_init_session(struct nfs_client *clp)
  546. {
  547. if (!nfs4_has_session(clp))
  548. return 0;
  549. clear_bit(NFS4_SESSION_INITING, &clp->cl_session->session_state);
  550. return nfs41_check_session_ready(clp);
  551. }
  552. int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
  553. {
  554. struct nfs4_session *session = clp->cl_session;
  555. int ret;
  556. spin_lock(&clp->cl_lock);
  557. if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
  558. /*
  559. * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
  560. * DS lease to be equal to the MDS lease.
  561. */
  562. clp->cl_lease_time = lease_time;
  563. clp->cl_last_renewal = jiffies;
  564. }
  565. spin_unlock(&clp->cl_lock);
  566. ret = nfs41_check_session_ready(clp);
  567. if (ret)
  568. return ret;
  569. /* Test for the DS role */
  570. if (!is_ds_client(clp))
  571. return -ENODEV;
  572. return 0;
  573. }
  574. EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
  575. #endif /* defined(CONFIG_NFS_V4_1) */