callback_proc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/nfs/callback_proc.c
  4. *
  5. * Copyright (C) 2004 Trond Myklebust
  6. *
  7. * NFSv4 callback procedures
  8. */
  9. #include <linux/nfs4.h>
  10. #include <linux/nfs_fs.h>
  11. #include <linux/slab.h>
  12. #include <linux/rcupdate.h>
  13. #include "nfs4_fs.h"
  14. #include "callback.h"
  15. #include "delegation.h"
  16. #include "internal.h"
  17. #include "pnfs.h"
  18. #include "nfs4session.h"
  19. #include "nfs4trace.h"
  20. #define NFSDBG_FACILITY NFSDBG_CALLBACK
  21. __be32 nfs4_callback_getattr(void *argp, void *resp,
  22. struct cb_process_state *cps)
  23. {
  24. struct cb_getattrargs *args = argp;
  25. struct cb_getattrres *res = resp;
  26. struct nfs_delegation *delegation;
  27. struct nfs_inode *nfsi;
  28. struct inode *inode;
  29. res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
  30. if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
  31. goto out;
  32. res->bitmap[0] = res->bitmap[1] = 0;
  33. res->status = htonl(NFS4ERR_BADHANDLE);
  34. dprintk_rcu("NFS: GETATTR callback request from %s\n",
  35. rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
  36. inode = nfs_delegation_find_inode(cps->clp, &args->fh);
  37. if (IS_ERR(inode)) {
  38. if (inode == ERR_PTR(-EAGAIN))
  39. res->status = htonl(NFS4ERR_DELAY);
  40. trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
  41. -ntohl(res->status));
  42. goto out;
  43. }
  44. nfsi = NFS_I(inode);
  45. rcu_read_lock();
  46. delegation = rcu_dereference(nfsi->delegation);
  47. if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
  48. goto out_iput;
  49. res->size = i_size_read(inode);
  50. res->change_attr = delegation->change_attr;
  51. if (nfs_have_writebacks(inode))
  52. res->change_attr++;
  53. res->ctime = timespec64_to_timespec(inode->i_ctime);
  54. res->mtime = timespec64_to_timespec(inode->i_mtime);
  55. res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
  56. args->bitmap[0];
  57. res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
  58. args->bitmap[1];
  59. res->status = 0;
  60. out_iput:
  61. rcu_read_unlock();
  62. trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
  63. nfs_iput_and_deactive(inode);
  64. out:
  65. dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
  66. return res->status;
  67. }
  68. __be32 nfs4_callback_recall(void *argp, void *resp,
  69. struct cb_process_state *cps)
  70. {
  71. struct cb_recallargs *args = argp;
  72. struct inode *inode;
  73. __be32 res;
  74. res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
  75. if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
  76. goto out;
  77. dprintk_rcu("NFS: RECALL callback request from %s\n",
  78. rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
  79. res = htonl(NFS4ERR_BADHANDLE);
  80. inode = nfs_delegation_find_inode(cps->clp, &args->fh);
  81. if (IS_ERR(inode)) {
  82. if (inode == ERR_PTR(-EAGAIN))
  83. res = htonl(NFS4ERR_DELAY);
  84. trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
  85. &args->stateid, -ntohl(res));
  86. goto out;
  87. }
  88. /* Set up a helper thread to actually return the delegation */
  89. switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
  90. case 0:
  91. res = 0;
  92. break;
  93. case -ENOENT:
  94. res = htonl(NFS4ERR_BAD_STATEID);
  95. break;
  96. default:
  97. res = htonl(NFS4ERR_RESOURCE);
  98. }
  99. trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
  100. &args->stateid, -ntohl(res));
  101. nfs_iput_and_deactive(inode);
  102. out:
  103. dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
  104. return res;
  105. }
  106. #if defined(CONFIG_NFS_V4_1)
  107. /*
  108. * Lookup a layout inode by stateid
  109. *
  110. * Note: returns a refcount on the inode and superblock
  111. */
  112. static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
  113. const nfs4_stateid *stateid)
  114. {
  115. struct nfs_server *server;
  116. struct inode *inode;
  117. struct pnfs_layout_hdr *lo;
  118. list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
  119. list_for_each_entry(lo, &server->layouts, plh_layouts) {
  120. if (stateid != NULL &&
  121. !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
  122. continue;
  123. inode = igrab(lo->plh_inode);
  124. if (!inode)
  125. return ERR_PTR(-EAGAIN);
  126. if (!nfs_sb_active(inode->i_sb)) {
  127. rcu_read_unlock();
  128. spin_unlock(&clp->cl_lock);
  129. iput(inode);
  130. spin_lock(&clp->cl_lock);
  131. rcu_read_lock();
  132. return ERR_PTR(-EAGAIN);
  133. }
  134. return inode;
  135. }
  136. }
  137. return ERR_PTR(-ENOENT);
  138. }
  139. /*
  140. * Lookup a layout inode by filehandle.
  141. *
  142. * Note: returns a refcount on the inode and superblock
  143. *
  144. */
  145. static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
  146. const struct nfs_fh *fh)
  147. {
  148. struct nfs_server *server;
  149. struct nfs_inode *nfsi;
  150. struct inode *inode;
  151. struct pnfs_layout_hdr *lo;
  152. list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
  153. list_for_each_entry(lo, &server->layouts, plh_layouts) {
  154. nfsi = NFS_I(lo->plh_inode);
  155. if (nfs_compare_fh(fh, &nfsi->fh))
  156. continue;
  157. if (nfsi->layout != lo)
  158. continue;
  159. inode = igrab(lo->plh_inode);
  160. if (!inode)
  161. return ERR_PTR(-EAGAIN);
  162. if (!nfs_sb_active(inode->i_sb)) {
  163. rcu_read_unlock();
  164. spin_unlock(&clp->cl_lock);
  165. iput(inode);
  166. spin_lock(&clp->cl_lock);
  167. rcu_read_lock();
  168. return ERR_PTR(-EAGAIN);
  169. }
  170. return inode;
  171. }
  172. }
  173. return ERR_PTR(-ENOENT);
  174. }
  175. static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
  176. const struct nfs_fh *fh,
  177. const nfs4_stateid *stateid)
  178. {
  179. struct inode *inode;
  180. spin_lock(&clp->cl_lock);
  181. rcu_read_lock();
  182. inode = nfs_layout_find_inode_by_stateid(clp, stateid);
  183. if (inode == ERR_PTR(-ENOENT))
  184. inode = nfs_layout_find_inode_by_fh(clp, fh);
  185. rcu_read_unlock();
  186. spin_unlock(&clp->cl_lock);
  187. return inode;
  188. }
  189. /*
  190. * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
  191. */
  192. static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
  193. const nfs4_stateid *new)
  194. {
  195. u32 oldseq, newseq;
  196. /* Is the stateid not initialised? */
  197. if (!pnfs_layout_is_valid(lo))
  198. return NFS4ERR_NOMATCHING_LAYOUT;
  199. /* Mismatched stateid? */
  200. if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
  201. return NFS4ERR_BAD_STATEID;
  202. newseq = be32_to_cpu(new->seqid);
  203. /* Are we already in a layout recall situation? */
  204. if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
  205. lo->plh_return_seq != 0) {
  206. if (newseq < lo->plh_return_seq)
  207. return NFS4ERR_OLD_STATEID;
  208. if (newseq > lo->plh_return_seq)
  209. return NFS4ERR_DELAY;
  210. goto out;
  211. }
  212. /* Check that the stateid matches what we think it should be. */
  213. oldseq = be32_to_cpu(lo->plh_stateid.seqid);
  214. if (newseq > oldseq + 1)
  215. return NFS4ERR_DELAY;
  216. /* Crazy server! */
  217. if (newseq <= oldseq)
  218. return NFS4ERR_OLD_STATEID;
  219. out:
  220. return NFS_OK;
  221. }
  222. static u32 initiate_file_draining(struct nfs_client *clp,
  223. struct cb_layoutrecallargs *args)
  224. {
  225. struct inode *ino;
  226. struct pnfs_layout_hdr *lo;
  227. u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
  228. LIST_HEAD(free_me_list);
  229. ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
  230. if (IS_ERR(ino)) {
  231. if (ino == ERR_PTR(-EAGAIN))
  232. rv = NFS4ERR_DELAY;
  233. goto out_noput;
  234. }
  235. pnfs_layoutcommit_inode(ino, false);
  236. spin_lock(&ino->i_lock);
  237. lo = NFS_I(ino)->layout;
  238. if (!lo) {
  239. spin_unlock(&ino->i_lock);
  240. goto out;
  241. }
  242. pnfs_get_layout_hdr(lo);
  243. rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
  244. if (rv != NFS_OK)
  245. goto unlock;
  246. /*
  247. * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
  248. */
  249. if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  250. rv = NFS4ERR_DELAY;
  251. goto unlock;
  252. }
  253. pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
  254. switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
  255. &args->cbl_range,
  256. be32_to_cpu(args->cbl_stateid.seqid))) {
  257. case 0:
  258. case -EBUSY:
  259. /* There are layout segments that need to be returned */
  260. rv = NFS4_OK;
  261. break;
  262. case -ENOENT:
  263. /* Embrace your forgetfulness! */
  264. rv = NFS4ERR_NOMATCHING_LAYOUT;
  265. if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
  266. NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
  267. &args->cbl_range);
  268. }
  269. }
  270. unlock:
  271. spin_unlock(&ino->i_lock);
  272. pnfs_free_lseg_list(&free_me_list);
  273. /* Free all lsegs that are attached to commit buckets */
  274. nfs_commit_inode(ino, 0);
  275. pnfs_put_layout_hdr(lo);
  276. out:
  277. nfs_iput_and_deactive(ino);
  278. out_noput:
  279. trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
  280. &args->cbl_stateid, -rv);
  281. return rv;
  282. }
  283. static u32 initiate_bulk_draining(struct nfs_client *clp,
  284. struct cb_layoutrecallargs *args)
  285. {
  286. int stat;
  287. if (args->cbl_recall_type == RETURN_FSID)
  288. stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
  289. else
  290. stat = pnfs_destroy_layouts_byclid(clp, true);
  291. if (stat != 0)
  292. return NFS4ERR_DELAY;
  293. return NFS4ERR_NOMATCHING_LAYOUT;
  294. }
  295. static u32 do_callback_layoutrecall(struct nfs_client *clp,
  296. struct cb_layoutrecallargs *args)
  297. {
  298. if (args->cbl_recall_type == RETURN_FILE)
  299. return initiate_file_draining(clp, args);
  300. return initiate_bulk_draining(clp, args);
  301. }
  302. __be32 nfs4_callback_layoutrecall(void *argp, void *resp,
  303. struct cb_process_state *cps)
  304. {
  305. struct cb_layoutrecallargs *args = argp;
  306. u32 res = NFS4ERR_OP_NOT_IN_SESSION;
  307. if (cps->clp)
  308. res = do_callback_layoutrecall(cps->clp, args);
  309. return cpu_to_be32(res);
  310. }
  311. static void pnfs_recall_all_layouts(struct nfs_client *clp)
  312. {
  313. struct cb_layoutrecallargs args;
  314. /* Pretend we got a CB_LAYOUTRECALL(ALL) */
  315. memset(&args, 0, sizeof(args));
  316. args.cbl_recall_type = RETURN_ALL;
  317. /* FIXME we ignore errors, what should we do? */
  318. do_callback_layoutrecall(clp, &args);
  319. }
  320. __be32 nfs4_callback_devicenotify(void *argp, void *resp,
  321. struct cb_process_state *cps)
  322. {
  323. struct cb_devicenotifyargs *args = argp;
  324. int i;
  325. __be32 res = 0;
  326. struct nfs_client *clp = cps->clp;
  327. struct nfs_server *server = NULL;
  328. if (!clp) {
  329. res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
  330. goto out;
  331. }
  332. for (i = 0; i < args->ndevs; i++) {
  333. struct cb_devicenotifyitem *dev = &args->devs[i];
  334. if (!server ||
  335. server->pnfs_curr_ld->id != dev->cbd_layout_type) {
  336. rcu_read_lock();
  337. list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
  338. if (server->pnfs_curr_ld &&
  339. server->pnfs_curr_ld->id == dev->cbd_layout_type) {
  340. rcu_read_unlock();
  341. goto found;
  342. }
  343. rcu_read_unlock();
  344. continue;
  345. }
  346. found:
  347. nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
  348. }
  349. out:
  350. kfree(args->devs);
  351. return res;
  352. }
  353. /*
  354. * Validate the sequenceID sent by the server.
  355. * Return success if the sequenceID is one more than what we last saw on
  356. * this slot, accounting for wraparound. Increments the slot's sequence.
  357. *
  358. * We don't yet implement a duplicate request cache, instead we set the
  359. * back channel ca_maxresponsesize_cached to zero. This is OK for now
  360. * since we only currently implement idempotent callbacks anyway.
  361. *
  362. * We have a single slot backchannel at this time, so we don't bother
  363. * checking the used_slots bit array on the table. The lower layer guarantees
  364. * a single outstanding callback request at a time.
  365. */
  366. static __be32
  367. validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
  368. const struct cb_sequenceargs * args)
  369. {
  370. if (args->csa_slotid > tbl->server_highest_slotid)
  371. return htonl(NFS4ERR_BADSLOT);
  372. /* Replay */
  373. if (args->csa_sequenceid == slot->seq_nr) {
  374. if (nfs4_test_locked_slot(tbl, slot->slot_nr))
  375. return htonl(NFS4ERR_DELAY);
  376. /* Signal process_op to set this error on next op */
  377. if (args->csa_cachethis == 0)
  378. return htonl(NFS4ERR_RETRY_UNCACHED_REP);
  379. /* Liar! We never allowed you to set csa_cachethis != 0 */
  380. return htonl(NFS4ERR_SEQ_FALSE_RETRY);
  381. }
  382. /* Note: wraparound relies on seq_nr being of type u32 */
  383. if (likely(args->csa_sequenceid == slot->seq_nr + 1))
  384. return htonl(NFS4_OK);
  385. /* Misordered request */
  386. return htonl(NFS4ERR_SEQ_MISORDERED);
  387. }
  388. /*
  389. * For each referring call triple, check the session's slot table for
  390. * a match. If the slot is in use and the sequence numbers match, the
  391. * client is still waiting for a response to the original request.
  392. */
  393. static int referring_call_exists(struct nfs_client *clp,
  394. uint32_t nrclists,
  395. struct referring_call_list *rclists,
  396. spinlock_t *lock)
  397. __releases(lock)
  398. __acquires(lock)
  399. {
  400. int status = 0;
  401. int i, j;
  402. struct nfs4_session *session;
  403. struct nfs4_slot_table *tbl;
  404. struct referring_call_list *rclist;
  405. struct referring_call *ref;
  406. /*
  407. * XXX When client trunking is implemented, this becomes
  408. * a session lookup from within the loop
  409. */
  410. session = clp->cl_session;
  411. tbl = &session->fc_slot_table;
  412. for (i = 0; i < nrclists; i++) {
  413. rclist = &rclists[i];
  414. if (memcmp(session->sess_id.data,
  415. rclist->rcl_sessionid.data,
  416. NFS4_MAX_SESSIONID_LEN) != 0)
  417. continue;
  418. for (j = 0; j < rclist->rcl_nrefcalls; j++) {
  419. ref = &rclist->rcl_refcalls[j];
  420. spin_unlock(lock);
  421. status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
  422. ref->rc_sequenceid, HZ >> 1) < 0;
  423. spin_lock(lock);
  424. if (status)
  425. goto out;
  426. }
  427. }
  428. out:
  429. return status;
  430. }
  431. __be32 nfs4_callback_sequence(void *argp, void *resp,
  432. struct cb_process_state *cps)
  433. {
  434. struct cb_sequenceargs *args = argp;
  435. struct cb_sequenceres *res = resp;
  436. struct nfs4_slot_table *tbl;
  437. struct nfs4_slot *slot;
  438. struct nfs_client *clp;
  439. int i;
  440. __be32 status = htonl(NFS4ERR_BADSESSION);
  441. clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
  442. &args->csa_sessionid, cps->minorversion);
  443. if (clp == NULL)
  444. goto out;
  445. if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
  446. goto out;
  447. tbl = &clp->cl_session->bc_slot_table;
  448. /* Set up res before grabbing the spinlock */
  449. memcpy(&res->csr_sessionid, &args->csa_sessionid,
  450. sizeof(res->csr_sessionid));
  451. res->csr_sequenceid = args->csa_sequenceid;
  452. res->csr_slotid = args->csa_slotid;
  453. spin_lock(&tbl->slot_tbl_lock);
  454. /* state manager is resetting the session */
  455. if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
  456. status = htonl(NFS4ERR_DELAY);
  457. /* Return NFS4ERR_BADSESSION if we're draining the session
  458. * in order to reset it.
  459. */
  460. if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
  461. status = htonl(NFS4ERR_BADSESSION);
  462. goto out_unlock;
  463. }
  464. status = htonl(NFS4ERR_BADSLOT);
  465. slot = nfs4_lookup_slot(tbl, args->csa_slotid);
  466. if (IS_ERR(slot))
  467. goto out_unlock;
  468. res->csr_highestslotid = tbl->server_highest_slotid;
  469. res->csr_target_highestslotid = tbl->target_highest_slotid;
  470. status = validate_seqid(tbl, slot, args);
  471. if (status)
  472. goto out_unlock;
  473. if (!nfs4_try_to_lock_slot(tbl, slot)) {
  474. status = htonl(NFS4ERR_DELAY);
  475. goto out_unlock;
  476. }
  477. cps->slot = slot;
  478. /* The ca_maxresponsesize_cached is 0 with no DRC */
  479. if (args->csa_cachethis != 0) {
  480. status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
  481. goto out_unlock;
  482. }
  483. /*
  484. * Check for pending referring calls. If a match is found, a
  485. * related callback was received before the response to the original
  486. * call.
  487. */
  488. if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
  489. &tbl->slot_tbl_lock) < 0) {
  490. status = htonl(NFS4ERR_DELAY);
  491. goto out_unlock;
  492. }
  493. /*
  494. * RFC5661 20.9.3
  495. * If CB_SEQUENCE returns an error, then the state of the slot
  496. * (sequence ID, cached reply) MUST NOT change.
  497. */
  498. slot->seq_nr = args->csa_sequenceid;
  499. out_unlock:
  500. spin_unlock(&tbl->slot_tbl_lock);
  501. out:
  502. cps->clp = clp; /* put in nfs4_callback_compound */
  503. for (i = 0; i < args->csa_nrclists; i++)
  504. kfree(args->csa_rclists[i].rcl_refcalls);
  505. kfree(args->csa_rclists);
  506. if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
  507. cps->drc_status = status;
  508. status = 0;
  509. } else
  510. res->csr_status = status;
  511. trace_nfs4_cb_sequence(args, res, status);
  512. return status;
  513. }
  514. static bool
  515. validate_bitmap_values(unsigned int mask)
  516. {
  517. return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
  518. }
  519. __be32 nfs4_callback_recallany(void *argp, void *resp,
  520. struct cb_process_state *cps)
  521. {
  522. struct cb_recallanyargs *args = argp;
  523. __be32 status;
  524. fmode_t flags = 0;
  525. status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
  526. if (!cps->clp) /* set in cb_sequence */
  527. goto out;
  528. dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
  529. rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
  530. status = cpu_to_be32(NFS4ERR_INVAL);
  531. if (!validate_bitmap_values(args->craa_type_mask))
  532. goto out;
  533. status = cpu_to_be32(NFS4_OK);
  534. if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
  535. flags = FMODE_READ;
  536. if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
  537. flags |= FMODE_WRITE;
  538. if (flags)
  539. nfs_expire_unused_delegation_types(cps->clp, flags);
  540. if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
  541. pnfs_recall_all_layouts(cps->clp);
  542. out:
  543. dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
  544. return status;
  545. }
  546. /* Reduce the fore channel's max_slots to the target value */
  547. __be32 nfs4_callback_recallslot(void *argp, void *resp,
  548. struct cb_process_state *cps)
  549. {
  550. struct cb_recallslotargs *args = argp;
  551. struct nfs4_slot_table *fc_tbl;
  552. __be32 status;
  553. status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
  554. if (!cps->clp) /* set in cb_sequence */
  555. goto out;
  556. dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
  557. rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
  558. args->crsa_target_highest_slotid);
  559. fc_tbl = &cps->clp->cl_session->fc_slot_table;
  560. status = htonl(NFS4_OK);
  561. nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
  562. nfs41_notify_server(cps->clp);
  563. out:
  564. dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
  565. return status;
  566. }
  567. __be32 nfs4_callback_notify_lock(void *argp, void *resp,
  568. struct cb_process_state *cps)
  569. {
  570. struct cb_notify_lock_args *args = argp;
  571. if (!cps->clp) /* set in cb_sequence */
  572. return htonl(NFS4ERR_OP_NOT_IN_SESSION);
  573. dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
  574. rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
  575. /* Don't wake anybody if the string looked bogus */
  576. if (args->cbnl_valid)
  577. __wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);
  578. return htonl(NFS4_OK);
  579. }
  580. #endif /* CONFIG_NFS_V4_1 */
  581. #ifdef CONFIG_NFS_V4_2
  582. static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state,
  583. struct cb_offloadargs *args)
  584. {
  585. cp_state->count = args->wr_count;
  586. cp_state->error = args->error;
  587. if (!args->error) {
  588. cp_state->verf.committed = args->wr_writeverf.committed;
  589. memcpy(&cp_state->verf.verifier.data[0],
  590. &args->wr_writeverf.verifier.data[0],
  591. NFS4_VERIFIER_SIZE);
  592. }
  593. }
  594. __be32 nfs4_callback_offload(void *data, void *dummy,
  595. struct cb_process_state *cps)
  596. {
  597. struct cb_offloadargs *args = data;
  598. struct nfs_server *server;
  599. struct nfs4_copy_state *copy, *tmp_copy;
  600. bool found = false;
  601. copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
  602. if (!copy)
  603. return htonl(NFS4ERR_SERVERFAULT);
  604. spin_lock(&cps->clp->cl_lock);
  605. rcu_read_lock();
  606. list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
  607. client_link) {
  608. list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
  609. if (memcmp(args->coa_stateid.other,
  610. tmp_copy->stateid.other,
  611. sizeof(args->coa_stateid.other)))
  612. continue;
  613. nfs4_copy_cb_args(tmp_copy, args);
  614. complete(&tmp_copy->completion);
  615. found = true;
  616. goto out;
  617. }
  618. }
  619. out:
  620. rcu_read_unlock();
  621. if (!found) {
  622. memcpy(&copy->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
  623. nfs4_copy_cb_args(copy, args);
  624. list_add_tail(&copy->copies, &cps->clp->pending_cb_stateids);
  625. } else
  626. kfree(copy);
  627. spin_unlock(&cps->clp->cl_lock);
  628. return 0;
  629. }
  630. #endif /* CONFIG_NFS_V4_2 */