direct.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071
  1. /*
  2. * linux/fs/nfs/direct.c
  3. *
  4. * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
  5. *
  6. * High-performance uncached I/O for the Linux NFS client
  7. *
  8. * There are important applications whose performance or correctness
  9. * depends on uncached access to file data. Database clusters
  10. * (multiple copies of the same instance running on separate hosts)
  11. * implement their own cache coherency protocol that subsumes file
  12. * system cache protocols. Applications that process datasets
  13. * considerably larger than the client's memory do not always benefit
  14. * from a local cache. A streaming video server, for instance, has no
  15. * need to cache the contents of a file.
  16. *
  17. * When an application requests uncached I/O, all read and write requests
  18. * are made directly to the server; data stored or fetched via these
  19. * requests is not cached in the Linux page cache. The client does not
  20. * correct unaligned requests from applications. All requested bytes are
  21. * held on permanent storage before a direct write system call returns to
  22. * an application.
  23. *
  24. * Solaris implements an uncached I/O facility called directio() that
  25. * is used for backups and sequential I/O to very large files. Solaris
  26. * also supports uncaching whole NFS partitions with "-o forcedirectio,"
  27. * an undocumented mount option.
  28. *
  29. * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
  30. * help from Andrew Morton.
  31. *
  32. * 18 Dec 2001 Initial implementation for 2.4 --cel
  33. * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
  34. * 08 Jun 2003 Port to 2.5 APIs --cel
  35. * 31 Mar 2004 Handle direct I/O without VFS support --cel
  36. * 15 Sep 2004 Parallel async reads --cel
  37. * 04 May 2005 support O_DIRECT with aio --cel
  38. *
  39. */
  40. #include <linux/errno.h>
  41. #include <linux/sched.h>
  42. #include <linux/kernel.h>
  43. #include <linux/file.h>
  44. #include <linux/pagemap.h>
  45. #include <linux/kref.h>
  46. #include <linux/slab.h>
  47. #include <linux/task_io_accounting_ops.h>
  48. #include <linux/module.h>
  49. #include <linux/nfs_fs.h>
  50. #include <linux/nfs_page.h>
  51. #include <linux/sunrpc/clnt.h>
  52. #include <asm/uaccess.h>
  53. #include <linux/atomic.h>
  54. #include "internal.h"
  55. #include "iostat.h"
  56. #include "pnfs.h"
  57. #define NFSDBG_FACILITY NFSDBG_VFS
  58. static struct kmem_cache *nfs_direct_cachep;
  59. /*
  60. * This represents a set of asynchronous requests that we're waiting on
  61. */
  62. struct nfs_direct_mirror {
  63. ssize_t count;
  64. };
  65. struct nfs_direct_req {
  66. struct kref kref; /* release manager */
  67. /* I/O parameters */
  68. struct nfs_open_context *ctx; /* file open context info */
  69. struct nfs_lock_context *l_ctx; /* Lock context info */
  70. struct kiocb * iocb; /* controlling i/o request */
  71. struct inode * inode; /* target file of i/o */
  72. /* completion state */
  73. atomic_t io_count; /* i/os we're waiting for */
  74. spinlock_t lock; /* protect completion state */
  75. struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
  76. int mirror_count;
  77. loff_t io_start; /* Start offset for I/O */
  78. ssize_t count, /* bytes actually processed */
  79. max_count, /* max expected count */
  80. bytes_left, /* bytes left to be sent */
  81. error; /* any reported error */
  82. struct completion completion; /* wait for i/o completion */
  83. /* commit state */
  84. struct nfs_mds_commit_info mds_cinfo; /* Storage for cinfo */
  85. struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
  86. struct work_struct work;
  87. int flags;
  88. #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
  89. #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
  90. struct nfs_writeverf verf; /* unstable write verifier */
  91. };
  92. static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
  93. static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
  94. static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
  95. static void nfs_direct_write_schedule_work(struct work_struct *work);
  96. static inline void get_dreq(struct nfs_direct_req *dreq)
  97. {
  98. atomic_inc(&dreq->io_count);
  99. }
  100. static inline int put_dreq(struct nfs_direct_req *dreq)
  101. {
  102. return atomic_dec_and_test(&dreq->io_count);
  103. }
  104. static void
  105. nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
  106. {
  107. int i;
  108. ssize_t count;
  109. WARN_ON_ONCE(dreq->count >= dreq->max_count);
  110. if (dreq->mirror_count == 1) {
  111. dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
  112. dreq->count += hdr->good_bytes;
  113. } else {
  114. /* mirrored writes */
  115. count = dreq->mirrors[hdr->pgio_mirror_idx].count;
  116. if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
  117. count = hdr->io_start + hdr->good_bytes - dreq->io_start;
  118. dreq->mirrors[hdr->pgio_mirror_idx].count = count;
  119. }
  120. /* update the dreq->count by finding the minimum agreed count from all
  121. * mirrors */
  122. count = dreq->mirrors[0].count;
  123. for (i = 1; i < dreq->mirror_count; i++)
  124. count = min(count, dreq->mirrors[i].count);
  125. dreq->count = count;
  126. }
  127. }
  128. /*
  129. * nfs_direct_select_verf - select the right verifier
  130. * @dreq - direct request possibly spanning multiple servers
  131. * @ds_clp - nfs_client of data server or NULL if MDS / non-pnfs
  132. * @commit_idx - commit bucket index for the DS
  133. *
  134. * returns the correct verifier to use given the role of the server
  135. */
  136. static struct nfs_writeverf *
  137. nfs_direct_select_verf(struct nfs_direct_req *dreq,
  138. struct nfs_client *ds_clp,
  139. int commit_idx)
  140. {
  141. struct nfs_writeverf *verfp = &dreq->verf;
  142. #ifdef CONFIG_NFS_V4_1
  143. /*
  144. * pNFS is in use, use the DS verf except commit_through_mds is set
  145. * for layout segment where nbuckets is zero.
  146. */
  147. if (ds_clp && dreq->ds_cinfo.nbuckets > 0) {
  148. if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
  149. verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
  150. else
  151. WARN_ON_ONCE(1);
  152. }
  153. #endif
  154. return verfp;
  155. }
  156. /*
  157. * nfs_direct_set_hdr_verf - set the write/commit verifier
  158. * @dreq - direct request possibly spanning multiple servers
  159. * @hdr - pageio header to validate against previously seen verfs
  160. *
  161. * Set the server's (MDS or DS) "seen" verifier
  162. */
  163. static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
  164. struct nfs_pgio_header *hdr)
  165. {
  166. struct nfs_writeverf *verfp;
  167. verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
  168. WARN_ON_ONCE(verfp->committed >= 0);
  169. memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
  170. WARN_ON_ONCE(verfp->committed < 0);
  171. }
  172. static int nfs_direct_cmp_verf(const struct nfs_writeverf *v1,
  173. const struct nfs_writeverf *v2)
  174. {
  175. return nfs_write_verifier_cmp(&v1->verifier, &v2->verifier);
  176. }
  177. /*
  178. * nfs_direct_cmp_hdr_verf - compare verifier for pgio header
  179. * @dreq - direct request possibly spanning multiple servers
  180. * @hdr - pageio header to validate against previously seen verf
  181. *
  182. * set the server's "seen" verf if not initialized.
  183. * returns result of comparison between @hdr->verf and the "seen"
  184. * verf of the server used by @hdr (DS or MDS)
  185. */
  186. static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
  187. struct nfs_pgio_header *hdr)
  188. {
  189. struct nfs_writeverf *verfp;
  190. verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
  191. if (verfp->committed < 0) {
  192. nfs_direct_set_hdr_verf(dreq, hdr);
  193. return 0;
  194. }
  195. return nfs_direct_cmp_verf(verfp, &hdr->verf);
  196. }
  197. /*
  198. * nfs_direct_cmp_commit_data_verf - compare verifier for commit data
  199. * @dreq - direct request possibly spanning multiple servers
  200. * @data - commit data to validate against previously seen verf
  201. *
  202. * returns result of comparison between @data->verf and the verf of
  203. * the server used by @data (DS or MDS)
  204. */
  205. static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
  206. struct nfs_commit_data *data)
  207. {
  208. struct nfs_writeverf *verfp;
  209. verfp = nfs_direct_select_verf(dreq, data->ds_clp,
  210. data->ds_commit_index);
  211. /* verifier not set so always fail */
  212. if (verfp->committed < 0)
  213. return 1;
  214. return nfs_direct_cmp_verf(verfp, &data->verf);
  215. }
  216. /**
  217. * nfs_direct_IO - NFS address space operation for direct I/O
  218. * @iocb: target I/O control block
  219. * @iter: I/O buffer
  220. *
  221. * The presence of this routine in the address space ops vector means
  222. * the NFS client supports direct I/O. However, for most direct IO, we
  223. * shunt off direct read and write requests before the VFS gets them,
  224. * so this method is only ever called for swap.
  225. */
  226. ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
  227. {
  228. struct inode *inode = iocb->ki_filp->f_mapping->host;
  229. /* we only support swap file calling nfs_direct_IO */
  230. if (!IS_SWAPFILE(inode))
  231. return 0;
  232. VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
  233. if (iov_iter_rw(iter) == READ)
  234. return nfs_file_direct_read(iocb, iter);
  235. return nfs_file_direct_write(iocb, iter);
  236. }
  237. static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
  238. {
  239. unsigned int i;
  240. for (i = 0; i < npages; i++)
  241. put_page(pages[i]);
  242. }
  243. void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
  244. struct nfs_direct_req *dreq)
  245. {
  246. cinfo->inode = dreq->inode;
  247. cinfo->mds = &dreq->mds_cinfo;
  248. cinfo->ds = &dreq->ds_cinfo;
  249. cinfo->dreq = dreq;
  250. cinfo->completion_ops = &nfs_direct_commit_completion_ops;
  251. }
  252. static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq,
  253. struct nfs_pageio_descriptor *pgio,
  254. struct nfs_page *req)
  255. {
  256. int mirror_count = 1;
  257. if (pgio->pg_ops->pg_get_mirror_count)
  258. mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
  259. dreq->mirror_count = mirror_count;
  260. }
  261. static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
  262. {
  263. struct nfs_direct_req *dreq;
  264. dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
  265. if (!dreq)
  266. return NULL;
  267. kref_init(&dreq->kref);
  268. kref_get(&dreq->kref);
  269. init_completion(&dreq->completion);
  270. INIT_LIST_HEAD(&dreq->mds_cinfo.list);
  271. dreq->verf.committed = NFS_INVALID_STABLE_HOW; /* not set yet */
  272. INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
  273. dreq->mirror_count = 1;
  274. spin_lock_init(&dreq->lock);
  275. return dreq;
  276. }
  277. static void nfs_direct_req_free(struct kref *kref)
  278. {
  279. struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
  280. nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
  281. if (dreq->l_ctx != NULL)
  282. nfs_put_lock_context(dreq->l_ctx);
  283. if (dreq->ctx != NULL)
  284. put_nfs_open_context(dreq->ctx);
  285. kmem_cache_free(nfs_direct_cachep, dreq);
  286. }
  287. static void nfs_direct_req_release(struct nfs_direct_req *dreq)
  288. {
  289. kref_put(&dreq->kref, nfs_direct_req_free);
  290. }
  291. ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
  292. {
  293. return dreq->bytes_left;
  294. }
  295. EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
  296. /*
  297. * Collects and returns the final error value/byte-count.
  298. */
  299. static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
  300. {
  301. ssize_t result = -EIOCBQUEUED;
  302. /* Async requests don't wait here */
  303. if (dreq->iocb)
  304. goto out;
  305. result = wait_for_completion_killable(&dreq->completion);
  306. if (!result) {
  307. result = dreq->count;
  308. WARN_ON_ONCE(dreq->count < 0);
  309. }
  310. if (!result)
  311. result = dreq->error;
  312. out:
  313. return (ssize_t) result;
  314. }
  315. /*
  316. * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
  317. * the iocb is still valid here if this is a synchronous request.
  318. */
  319. static void nfs_direct_complete(struct nfs_direct_req *dreq)
  320. {
  321. struct inode *inode = dreq->inode;
  322. inode_dio_end(inode);
  323. if (dreq->iocb) {
  324. long res = (long) dreq->error;
  325. if (dreq->count != 0) {
  326. res = (long) dreq->count;
  327. WARN_ON_ONCE(dreq->count < 0);
  328. }
  329. dreq->iocb->ki_complete(dreq->iocb, res, 0);
  330. }
  331. complete(&dreq->completion);
  332. nfs_direct_req_release(dreq);
  333. }
  334. static void nfs_direct_readpage_release(struct nfs_page *req)
  335. {
  336. dprintk("NFS: direct read done (%s/%llu %d@%lld)\n",
  337. req->wb_context->dentry->d_sb->s_id,
  338. (unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)),
  339. req->wb_bytes,
  340. (long long)req_offset(req));
  341. nfs_release_request(req);
  342. }
  343. static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
  344. {
  345. unsigned long bytes = 0;
  346. struct nfs_direct_req *dreq = hdr->dreq;
  347. if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
  348. goto out_put;
  349. spin_lock(&dreq->lock);
  350. if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
  351. dreq->error = hdr->error;
  352. else
  353. nfs_direct_good_bytes(dreq, hdr);
  354. spin_unlock(&dreq->lock);
  355. while (!list_empty(&hdr->pages)) {
  356. struct nfs_page *req = nfs_list_entry(hdr->pages.next);
  357. struct page *page = req->wb_page;
  358. if (!PageCompound(page) && bytes < hdr->good_bytes)
  359. set_page_dirty(page);
  360. bytes += req->wb_bytes;
  361. nfs_list_remove_request(req);
  362. nfs_direct_readpage_release(req);
  363. }
  364. out_put:
  365. if (put_dreq(dreq))
  366. nfs_direct_complete(dreq);
  367. hdr->release(hdr);
  368. }
  369. static void nfs_read_sync_pgio_error(struct list_head *head)
  370. {
  371. struct nfs_page *req;
  372. while (!list_empty(head)) {
  373. req = nfs_list_entry(head->next);
  374. nfs_list_remove_request(req);
  375. nfs_release_request(req);
  376. }
  377. }
  378. static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
  379. {
  380. get_dreq(hdr->dreq);
  381. }
  382. static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
  383. .error_cleanup = nfs_read_sync_pgio_error,
  384. .init_hdr = nfs_direct_pgio_init,
  385. .completion = nfs_direct_read_completion,
  386. };
  387. /*
  388. * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
  389. * operation. If nfs_readdata_alloc() or get_user_pages() fails,
  390. * bail and stop sending more reads. Read length accounting is
  391. * handled automatically by nfs_direct_read_result(). Otherwise, if
  392. * no requests have been sent, just return an error.
  393. */
  394. static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
  395. struct iov_iter *iter,
  396. loff_t pos)
  397. {
  398. struct nfs_pageio_descriptor desc;
  399. struct inode *inode = dreq->inode;
  400. ssize_t result = -EINVAL;
  401. size_t requested_bytes = 0;
  402. size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
  403. nfs_pageio_init_read(&desc, dreq->inode, false,
  404. &nfs_direct_read_completion_ops);
  405. get_dreq(dreq);
  406. desc.pg_dreq = dreq;
  407. inode_dio_begin(inode);
  408. while (iov_iter_count(iter)) {
  409. struct page **pagevec;
  410. size_t bytes;
  411. size_t pgbase;
  412. unsigned npages, i;
  413. result = iov_iter_get_pages_alloc(iter, &pagevec,
  414. rsize, &pgbase);
  415. if (result < 0)
  416. break;
  417. bytes = result;
  418. iov_iter_advance(iter, bytes);
  419. npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
  420. for (i = 0; i < npages; i++) {
  421. struct nfs_page *req;
  422. unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
  423. /* XXX do we need to do the eof zeroing found in async_filler? */
  424. req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
  425. pgbase, req_len);
  426. if (IS_ERR(req)) {
  427. result = PTR_ERR(req);
  428. break;
  429. }
  430. req->wb_index = pos >> PAGE_SHIFT;
  431. req->wb_offset = pos & ~PAGE_MASK;
  432. if (!nfs_pageio_add_request(&desc, req)) {
  433. result = desc.pg_error;
  434. nfs_release_request(req);
  435. break;
  436. }
  437. pgbase = 0;
  438. bytes -= req_len;
  439. requested_bytes += req_len;
  440. pos += req_len;
  441. dreq->bytes_left -= req_len;
  442. }
  443. nfs_direct_release_pages(pagevec, npages);
  444. kvfree(pagevec);
  445. if (result < 0)
  446. break;
  447. }
  448. nfs_pageio_complete(&desc);
  449. /*
  450. * If no bytes were started, return the error, and let the
  451. * generic layer handle the completion.
  452. */
  453. if (requested_bytes == 0) {
  454. inode_dio_end(inode);
  455. nfs_direct_req_release(dreq);
  456. return result < 0 ? result : -EIO;
  457. }
  458. if (put_dreq(dreq))
  459. nfs_direct_complete(dreq);
  460. return 0;
  461. }
  462. /**
  463. * nfs_file_direct_read - file direct read operation for NFS files
  464. * @iocb: target I/O control block
  465. * @iter: vector of user buffers into which to read data
  466. *
  467. * We use this function for direct reads instead of calling
  468. * generic_file_aio_read() in order to avoid gfar's check to see if
  469. * the request starts before the end of the file. For that check
  470. * to work, we must generate a GETATTR before each direct read, and
  471. * even then there is a window between the GETATTR and the subsequent
  472. * READ where the file size could change. Our preference is simply
  473. * to do all reads the application wants, and the server will take
  474. * care of managing the end of file boundary.
  475. *
  476. * This function also eliminates unnecessarily updating the file's
  477. * atime locally, as the NFS server sets the file's atime, and this
  478. * client must read the updated atime from the server back into its
  479. * cache.
  480. */
  481. ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
  482. {
  483. struct file *file = iocb->ki_filp;
  484. struct address_space *mapping = file->f_mapping;
  485. struct inode *inode = mapping->host;
  486. struct nfs_direct_req *dreq;
  487. struct nfs_lock_context *l_ctx;
  488. ssize_t result = -EINVAL;
  489. size_t count = iov_iter_count(iter);
  490. nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
  491. dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
  492. file, count, (long long) iocb->ki_pos);
  493. result = 0;
  494. if (!count)
  495. goto out;
  496. task_io_account_read(count);
  497. result = -ENOMEM;
  498. dreq = nfs_direct_req_alloc();
  499. if (dreq == NULL)
  500. goto out;
  501. dreq->inode = inode;
  502. dreq->bytes_left = dreq->max_count = count;
  503. dreq->io_start = iocb->ki_pos;
  504. dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
  505. l_ctx = nfs_get_lock_context(dreq->ctx);
  506. if (IS_ERR(l_ctx)) {
  507. result = PTR_ERR(l_ctx);
  508. goto out_release;
  509. }
  510. dreq->l_ctx = l_ctx;
  511. if (!is_sync_kiocb(iocb))
  512. dreq->iocb = iocb;
  513. nfs_start_io_direct(inode);
  514. NFS_I(inode)->read_io += count;
  515. result = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
  516. nfs_end_io_direct(inode);
  517. if (!result) {
  518. result = nfs_direct_wait(dreq);
  519. if (result > 0)
  520. iocb->ki_pos += result;
  521. }
  522. out_release:
  523. nfs_direct_req_release(dreq);
  524. out:
  525. return result;
  526. }
  527. static void
  528. nfs_direct_write_scan_commit_list(struct inode *inode,
  529. struct list_head *list,
  530. struct nfs_commit_info *cinfo)
  531. {
  532. spin_lock(&cinfo->inode->i_lock);
  533. #ifdef CONFIG_NFS_V4_1
  534. if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
  535. NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
  536. #endif
  537. nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
  538. spin_unlock(&cinfo->inode->i_lock);
  539. }
  540. static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
  541. {
  542. struct nfs_pageio_descriptor desc;
  543. struct nfs_page *req, *tmp;
  544. LIST_HEAD(reqs);
  545. struct nfs_commit_info cinfo;
  546. LIST_HEAD(failed);
  547. int i;
  548. nfs_init_cinfo_from_dreq(&cinfo, dreq);
  549. nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
  550. dreq->count = 0;
  551. dreq->verf.committed = NFS_INVALID_STABLE_HOW;
  552. nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
  553. for (i = 0; i < dreq->mirror_count; i++)
  554. dreq->mirrors[i].count = 0;
  555. get_dreq(dreq);
  556. nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
  557. &nfs_direct_write_completion_ops);
  558. desc.pg_dreq = dreq;
  559. req = nfs_list_entry(reqs.next);
  560. nfs_direct_setup_mirroring(dreq, &desc, req);
  561. if (desc.pg_error < 0) {
  562. list_splice_init(&reqs, &failed);
  563. goto out_failed;
  564. }
  565. list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
  566. if (!nfs_pageio_add_request(&desc, req)) {
  567. nfs_list_remove_request(req);
  568. nfs_list_add_request(req, &failed);
  569. spin_lock(&cinfo.inode->i_lock);
  570. dreq->flags = 0;
  571. if (desc.pg_error < 0)
  572. dreq->error = desc.pg_error;
  573. else
  574. dreq->error = -EIO;
  575. spin_unlock(&cinfo.inode->i_lock);
  576. }
  577. nfs_release_request(req);
  578. }
  579. nfs_pageio_complete(&desc);
  580. out_failed:
  581. while (!list_empty(&failed)) {
  582. req = nfs_list_entry(failed.next);
  583. nfs_list_remove_request(req);
  584. nfs_unlock_and_release_request(req);
  585. }
  586. if (put_dreq(dreq))
  587. nfs_direct_write_complete(dreq, dreq->inode);
  588. }
  589. static void nfs_direct_commit_complete(struct nfs_commit_data *data)
  590. {
  591. struct nfs_direct_req *dreq = data->dreq;
  592. struct nfs_commit_info cinfo;
  593. struct nfs_page *req;
  594. int status = data->task.tk_status;
  595. nfs_init_cinfo_from_dreq(&cinfo, dreq);
  596. if (status < 0) {
  597. dprintk("NFS: %5u commit failed with error %d.\n",
  598. data->task.tk_pid, status);
  599. dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
  600. } else if (nfs_direct_cmp_commit_data_verf(dreq, data)) {
  601. dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
  602. dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
  603. }
  604. dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
  605. while (!list_empty(&data->pages)) {
  606. req = nfs_list_entry(data->pages.next);
  607. nfs_list_remove_request(req);
  608. if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
  609. /* Note the rewrite will go through mds */
  610. nfs_mark_request_commit(req, NULL, &cinfo, 0);
  611. } else
  612. nfs_release_request(req);
  613. nfs_unlock_and_release_request(req);
  614. }
  615. if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
  616. nfs_direct_write_complete(dreq, data->inode);
  617. }
  618. static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
  619. struct nfs_page *req)
  620. {
  621. struct nfs_direct_req *dreq = cinfo->dreq;
  622. spin_lock(&dreq->lock);
  623. dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
  624. spin_unlock(&dreq->lock);
  625. nfs_mark_request_commit(req, NULL, cinfo, 0);
  626. }
  627. static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
  628. .completion = nfs_direct_commit_complete,
  629. .resched_write = nfs_direct_resched_write,
  630. };
  631. static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
  632. {
  633. int res;
  634. struct nfs_commit_info cinfo;
  635. LIST_HEAD(mds_list);
  636. nfs_init_cinfo_from_dreq(&cinfo, dreq);
  637. nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
  638. res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
  639. if (res < 0) /* res == -ENOMEM */
  640. nfs_direct_write_reschedule(dreq);
  641. }
  642. static void nfs_direct_write_schedule_work(struct work_struct *work)
  643. {
  644. struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
  645. int flags = dreq->flags;
  646. dreq->flags = 0;
  647. switch (flags) {
  648. case NFS_ODIRECT_DO_COMMIT:
  649. nfs_direct_commit_schedule(dreq);
  650. break;
  651. case NFS_ODIRECT_RESCHED_WRITES:
  652. nfs_direct_write_reschedule(dreq);
  653. break;
  654. default:
  655. nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
  656. nfs_direct_complete(dreq);
  657. }
  658. }
  659. static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
  660. {
  661. schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
  662. }
  663. static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
  664. {
  665. struct nfs_direct_req *dreq = hdr->dreq;
  666. struct nfs_commit_info cinfo;
  667. bool request_commit = false;
  668. struct nfs_page *req = nfs_list_entry(hdr->pages.next);
  669. if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
  670. goto out_put;
  671. nfs_init_cinfo_from_dreq(&cinfo, dreq);
  672. spin_lock(&dreq->lock);
  673. if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
  674. dreq->error = hdr->error;
  675. if (dreq->error == 0) {
  676. nfs_direct_good_bytes(dreq, hdr);
  677. if (nfs_write_need_commit(hdr)) {
  678. if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
  679. request_commit = true;
  680. else if (dreq->flags == 0) {
  681. nfs_direct_set_hdr_verf(dreq, hdr);
  682. request_commit = true;
  683. dreq->flags = NFS_ODIRECT_DO_COMMIT;
  684. } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
  685. request_commit = true;
  686. if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr))
  687. dreq->flags =
  688. NFS_ODIRECT_RESCHED_WRITES;
  689. }
  690. }
  691. }
  692. spin_unlock(&dreq->lock);
  693. while (!list_empty(&hdr->pages)) {
  694. req = nfs_list_entry(hdr->pages.next);
  695. nfs_list_remove_request(req);
  696. if (request_commit) {
  697. kref_get(&req->wb_kref);
  698. nfs_mark_request_commit(req, hdr->lseg, &cinfo,
  699. hdr->ds_commit_idx);
  700. }
  701. nfs_unlock_and_release_request(req);
  702. }
  703. out_put:
  704. if (put_dreq(dreq))
  705. nfs_direct_write_complete(dreq, hdr->inode);
  706. hdr->release(hdr);
  707. }
  708. static void nfs_write_sync_pgio_error(struct list_head *head)
  709. {
  710. struct nfs_page *req;
  711. while (!list_empty(head)) {
  712. req = nfs_list_entry(head->next);
  713. nfs_list_remove_request(req);
  714. nfs_unlock_and_release_request(req);
  715. }
  716. }
  717. static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
  718. {
  719. struct nfs_direct_req *dreq = hdr->dreq;
  720. spin_lock(&dreq->lock);
  721. if (dreq->error == 0) {
  722. dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
  723. /* fake unstable write to let common nfs resend pages */
  724. hdr->verf.committed = NFS_UNSTABLE;
  725. hdr->good_bytes = hdr->args.count;
  726. }
  727. spin_unlock(&dreq->lock);
  728. }
  729. static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
  730. .error_cleanup = nfs_write_sync_pgio_error,
  731. .init_hdr = nfs_direct_pgio_init,
  732. .completion = nfs_direct_write_completion,
  733. .reschedule_io = nfs_direct_write_reschedule_io,
  734. };
  735. /*
  736. * NB: Return the value of the first error return code. Subsequent
  737. * errors after the first one are ignored.
  738. */
  739. /*
  740. * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
  741. * operation. If nfs_writedata_alloc() or get_user_pages() fails,
  742. * bail and stop sending more writes. Write length accounting is
  743. * handled automatically by nfs_direct_write_result(). Otherwise, if
  744. * no requests have been sent, just return an error.
  745. */
  746. static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
  747. struct iov_iter *iter,
  748. loff_t pos)
  749. {
  750. struct nfs_pageio_descriptor desc;
  751. struct inode *inode = dreq->inode;
  752. ssize_t result = 0;
  753. size_t requested_bytes = 0;
  754. size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
  755. nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
  756. &nfs_direct_write_completion_ops);
  757. desc.pg_dreq = dreq;
  758. get_dreq(dreq);
  759. inode_dio_begin(inode);
  760. NFS_I(inode)->write_io += iov_iter_count(iter);
  761. while (iov_iter_count(iter)) {
  762. struct page **pagevec;
  763. size_t bytes;
  764. size_t pgbase;
  765. unsigned npages, i;
  766. result = iov_iter_get_pages_alloc(iter, &pagevec,
  767. wsize, &pgbase);
  768. if (result < 0)
  769. break;
  770. bytes = result;
  771. iov_iter_advance(iter, bytes);
  772. npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
  773. for (i = 0; i < npages; i++) {
  774. struct nfs_page *req;
  775. unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
  776. req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
  777. pgbase, req_len);
  778. if (IS_ERR(req)) {
  779. result = PTR_ERR(req);
  780. break;
  781. }
  782. nfs_direct_setup_mirroring(dreq, &desc, req);
  783. if (desc.pg_error < 0) {
  784. nfs_free_request(req);
  785. result = desc.pg_error;
  786. break;
  787. }
  788. nfs_lock_request(req);
  789. req->wb_index = pos >> PAGE_SHIFT;
  790. req->wb_offset = pos & ~PAGE_MASK;
  791. if (!nfs_pageio_add_request(&desc, req)) {
  792. result = desc.pg_error;
  793. nfs_unlock_and_release_request(req);
  794. break;
  795. }
  796. pgbase = 0;
  797. bytes -= req_len;
  798. requested_bytes += req_len;
  799. pos += req_len;
  800. dreq->bytes_left -= req_len;
  801. }
  802. nfs_direct_release_pages(pagevec, npages);
  803. kvfree(pagevec);
  804. if (result < 0)
  805. break;
  806. }
  807. nfs_pageio_complete(&desc);
  808. /*
  809. * If no bytes were started, return the error, and let the
  810. * generic layer handle the completion.
  811. */
  812. if (requested_bytes == 0) {
  813. inode_dio_end(inode);
  814. nfs_direct_req_release(dreq);
  815. return result < 0 ? result : -EIO;
  816. }
  817. if (put_dreq(dreq))
  818. nfs_direct_write_complete(dreq, dreq->inode);
  819. return 0;
  820. }
  821. /**
  822. * nfs_file_direct_write - file direct write operation for NFS files
  823. * @iocb: target I/O control block
  824. * @iter: vector of user buffers from which to write data
  825. *
  826. * We use this function for direct writes instead of calling
  827. * generic_file_aio_write() in order to avoid taking the inode
  828. * semaphore and updating the i_size. The NFS server will set
  829. * the new i_size and this client must read the updated size
  830. * back into its cache. We let the server do generic write
  831. * parameter checking and report problems.
  832. *
  833. * We eliminate local atime updates, see direct read above.
  834. *
  835. * We avoid unnecessary page cache invalidations for normal cached
  836. * readers of this file.
  837. *
  838. * Note that O_APPEND is not supported for NFS direct writes, as there
  839. * is no atomic O_APPEND write facility in the NFS protocol.
  840. */
  841. ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
  842. {
  843. ssize_t result = -EINVAL;
  844. size_t count;
  845. struct file *file = iocb->ki_filp;
  846. struct address_space *mapping = file->f_mapping;
  847. struct inode *inode = mapping->host;
  848. struct nfs_direct_req *dreq;
  849. struct nfs_lock_context *l_ctx;
  850. loff_t pos, end;
  851. dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
  852. file, iov_iter_count(iter), (long long) iocb->ki_pos);
  853. result = generic_write_checks(iocb, iter);
  854. if (result <= 0)
  855. return result;
  856. count = result;
  857. nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
  858. pos = iocb->ki_pos;
  859. end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
  860. task_io_account_write(count);
  861. result = -ENOMEM;
  862. dreq = nfs_direct_req_alloc();
  863. if (!dreq)
  864. goto out;
  865. dreq->inode = inode;
  866. dreq->bytes_left = dreq->max_count = count;
  867. dreq->io_start = pos;
  868. dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
  869. l_ctx = nfs_get_lock_context(dreq->ctx);
  870. if (IS_ERR(l_ctx)) {
  871. result = PTR_ERR(l_ctx);
  872. goto out_release;
  873. }
  874. dreq->l_ctx = l_ctx;
  875. if (!is_sync_kiocb(iocb))
  876. dreq->iocb = iocb;
  877. nfs_start_io_direct(inode);
  878. result = nfs_direct_write_schedule_iovec(dreq, iter, pos);
  879. if (mapping->nrpages) {
  880. invalidate_inode_pages2_range(mapping,
  881. pos >> PAGE_SHIFT, end);
  882. }
  883. nfs_end_io_direct(inode);
  884. if (!result) {
  885. result = nfs_direct_wait(dreq);
  886. if (result > 0) {
  887. iocb->ki_pos = pos + result;
  888. /* XXX: should check the generic_write_sync retval */
  889. generic_write_sync(iocb, result);
  890. }
  891. }
  892. out_release:
  893. nfs_direct_req_release(dreq);
  894. out:
  895. return result;
  896. }
  897. /**
  898. * nfs_init_directcache - create a slab cache for nfs_direct_req structures
  899. *
  900. */
  901. int __init nfs_init_directcache(void)
  902. {
  903. nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
  904. sizeof(struct nfs_direct_req),
  905. 0, (SLAB_RECLAIM_ACCOUNT|
  906. SLAB_MEM_SPREAD),
  907. NULL);
  908. if (nfs_direct_cachep == NULL)
  909. return -ENOMEM;
  910. return 0;
  911. }
  912. /**
  913. * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
  914. *
  915. */
  916. void nfs_destroy_directcache(void)
  917. {
  918. kmem_cache_destroy(nfs_direct_cachep);
  919. }