nfs_bio.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. /* $OpenBSD: nfs_bio.c,v 1.80 2015/03/14 03:38:52 jsg Exp $ */
  2. /* $NetBSD: nfs_bio.c,v 1.25.4.2 1996/07/08 20:47:04 jtc Exp $ */
  3. /*
  4. * Copyright (c) 1989, 1993
  5. * The Regents of the University of California. All rights reserved.
  6. *
  7. * This code is derived from software contributed to Berkeley by
  8. * Rick Macklem at The University of Guelph.
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. * 1. Redistributions of source code must retain the above copyright
  14. * notice, this list of conditions and the following disclaimer.
  15. * 2. Redistributions in binary form must reproduce the above copyright
  16. * notice, this list of conditions and the following disclaimer in the
  17. * documentation and/or other materials provided with the distribution.
  18. * 3. Neither the name of the University nor the names of its contributors
  19. * may be used to endorse or promote products derived from this software
  20. * without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  23. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  26. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  27. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  28. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  29. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  30. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  31. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  32. * SUCH DAMAGE.
  33. *
  34. * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
  35. */
  36. #include <sys/param.h>
  37. #include <sys/systm.h>
  38. #include <sys/resourcevar.h>
  39. #include <sys/signalvar.h>
  40. #include <sys/proc.h>
  41. #include <sys/buf.h>
  42. #include <sys/vnode.h>
  43. #include <sys/mount.h>
  44. #include <sys/kernel.h>
  45. #include <sys/namei.h>
  46. #include <sys/queue.h>
  47. #include <sys/time.h>
  48. #include <nfs/nfsproto.h>
  49. #include <nfs/nfs.h>
  50. #include <nfs/nfsmount.h>
  51. #include <nfs/nfsnode.h>
  52. #include <nfs/nfs_var.h>
  53. extern int nfs_numasync;
  54. extern struct nfsstats nfsstats;
  55. struct nfs_bufqhead nfs_bufq;
  56. uint32_t nfs_bufqmax, nfs_bufqlen;
  57. /*
  58. * Vnode op for read using bio
  59. * Any similarity to readip() is purely coincidental
  60. */
  61. int
  62. nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
  63. {
  64. struct nfsnode *np = VTONFS(vp);
  65. int biosize, diff;
  66. struct buf *bp = NULL, *rabp;
  67. struct vattr vattr;
  68. struct proc *p;
  69. struct nfsmount *nmp = VFSTONFS(vp->v_mount);
  70. daddr_t lbn, bn, rabn;
  71. caddr_t baddr;
  72. int got_buf = 0, nra, error = 0, n = 0, on = 0, not_readin;
  73. off_t offdiff;
  74. #ifdef DIAGNOSTIC
  75. if (uio->uio_rw != UIO_READ)
  76. panic("nfs_read mode");
  77. #endif
  78. if (uio->uio_resid == 0)
  79. return (0);
  80. if (uio->uio_offset < 0)
  81. return (EINVAL);
  82. p = uio->uio_procp;
  83. if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3)
  84. (void)nfs_fsinfo(nmp, vp, cred, p);
  85. biosize = nmp->nm_rsize;
  86. /*
  87. * For nfs, cache consistency can only be maintained approximately.
  88. * Although RFC1094 does not specify the criteria, the following is
  89. * believed to be compatible with the reference port.
  90. * For nfs:
  91. * If the file's modify time on the server has changed since the
  92. * last read rpc or you have written to the file,
  93. * you may have lost data cache consistency with the
  94. * server, so flush all of the file's data out of the cache.
  95. * Then force a getattr rpc to ensure that you have up to date
  96. * attributes.
  97. */
  98. if (np->n_flag & NMODIFIED) {
  99. NFS_INVALIDATE_ATTRCACHE(np);
  100. error = VOP_GETATTR(vp, &vattr, cred, p);
  101. if (error)
  102. return (error);
  103. np->n_mtime = vattr.va_mtime;
  104. } else {
  105. error = VOP_GETATTR(vp, &vattr, cred, p);
  106. if (error)
  107. return (error);
  108. if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) {
  109. error = nfs_vinvalbuf(vp, V_SAVE, cred, p);
  110. if (error)
  111. return (error);
  112. np->n_mtime = vattr.va_mtime;
  113. }
  114. }
  115. /*
  116. * update the cache read creds for this vnode
  117. */
  118. if (np->n_rcred)
  119. crfree(np->n_rcred);
  120. np->n_rcred = cred;
  121. crhold(cred);
  122. do {
  123. if ((vp->v_flag & VROOT) && vp->v_type == VLNK) {
  124. return (nfs_readlinkrpc(vp, uio, cred));
  125. }
  126. baddr = NULL;
  127. switch (vp->v_type) {
  128. case VREG:
  129. nfsstats.biocache_reads++;
  130. lbn = uio->uio_offset / biosize;
  131. on = uio->uio_offset & (biosize - 1);
  132. bn = lbn * (biosize / DEV_BSIZE);
  133. not_readin = 1;
  134. /*
  135. * Start the read ahead(s), as required.
  136. */
  137. if (nfs_numasync > 0 && nmp->nm_readahead > 0) {
  138. for (nra = 0; nra < nmp->nm_readahead &&
  139. (lbn + 1 + nra) * biosize < np->n_size; nra++) {
  140. rabn = (lbn + 1 + nra) * (biosize / DEV_BSIZE);
  141. if (!incore(vp, rabn)) {
  142. rabp = nfs_getcacheblk(vp, rabn, biosize, p);
  143. if (!rabp)
  144. return (EINTR);
  145. if ((rabp->b_flags & (B_DELWRI | B_DONE)) == 0) {
  146. rabp->b_flags |= (B_READ | B_ASYNC);
  147. if (nfs_asyncio(rabp, 1)) {
  148. rabp->b_flags |= B_INVAL;
  149. brelse(rabp);
  150. }
  151. } else
  152. brelse(rabp);
  153. }
  154. }
  155. }
  156. again:
  157. bp = nfs_getcacheblk(vp, bn, biosize, p);
  158. if (!bp)
  159. return (EINTR);
  160. got_buf = 1;
  161. if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
  162. bp->b_flags |= B_READ;
  163. not_readin = 0;
  164. error = nfs_doio(bp, p);
  165. if (error) {
  166. brelse(bp);
  167. return (error);
  168. }
  169. }
  170. n = min((unsigned)(biosize - on), uio->uio_resid);
  171. offdiff = np->n_size - uio->uio_offset;
  172. if (offdiff < (off_t)n)
  173. n = (int)offdiff;
  174. if (not_readin && n > 0) {
  175. if (on < bp->b_validoff || (on + n) > bp->b_validend) {
  176. bp->b_flags |= B_INVAFTERWRITE;
  177. if (bp->b_dirtyend > 0) {
  178. if ((bp->b_flags & B_DELWRI) == 0)
  179. panic("nfsbioread");
  180. if (VOP_BWRITE(bp) == EINTR)
  181. return (EINTR);
  182. } else
  183. brelse(bp);
  184. goto again;
  185. }
  186. }
  187. diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on);
  188. if (diff < n)
  189. n = diff;
  190. break;
  191. case VLNK:
  192. nfsstats.biocache_readlinks++;
  193. bp = nfs_getcacheblk(vp, 0, NFS_MAXPATHLEN, p);
  194. if (!bp)
  195. return (EINTR);
  196. if ((bp->b_flags & B_DONE) == 0) {
  197. bp->b_flags |= B_READ;
  198. error = nfs_doio(bp, p);
  199. if (error) {
  200. brelse(bp);
  201. return (error);
  202. }
  203. }
  204. n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
  205. got_buf = 1;
  206. on = 0;
  207. break;
  208. default:
  209. panic("nfsbioread: type %x unexpected", vp->v_type);
  210. break;
  211. }
  212. if (n > 0) {
  213. if (!baddr)
  214. baddr = bp->b_data;
  215. error = uiomovei(baddr + on, (int)n, uio);
  216. }
  217. if (vp->v_type == VLNK)
  218. n = 0;
  219. if (got_buf)
  220. brelse(bp);
  221. } while (error == 0 && uio->uio_resid > 0 && n > 0);
  222. return (error);
  223. }
  224. /*
  225. * Vnode op for write using bio
  226. */
  227. int
  228. nfs_write(void *v)
  229. {
  230. struct vop_write_args *ap = v;
  231. int biosize;
  232. struct uio *uio = ap->a_uio;
  233. struct proc *p = uio->uio_procp;
  234. struct vnode *vp = ap->a_vp;
  235. struct nfsnode *np = VTONFS(vp);
  236. struct ucred *cred = ap->a_cred;
  237. int ioflag = ap->a_ioflag;
  238. struct buf *bp;
  239. struct vattr vattr;
  240. struct nfsmount *nmp = VFSTONFS(vp->v_mount);
  241. daddr_t lbn, bn;
  242. int n, on, error = 0, extended = 0, wrotedta = 0, truncated = 0;
  243. ssize_t overrun;
  244. #ifdef DIAGNOSTIC
  245. if (uio->uio_rw != UIO_WRITE)
  246. panic("nfs_write mode");
  247. if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
  248. panic("nfs_write proc");
  249. #endif
  250. if (vp->v_type != VREG)
  251. return (EIO);
  252. if (np->n_flag & NWRITEERR) {
  253. np->n_flag &= ~NWRITEERR;
  254. return (np->n_error);
  255. }
  256. if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3)
  257. (void)nfs_fsinfo(nmp, vp, cred, p);
  258. if (ioflag & (IO_APPEND | IO_SYNC)) {
  259. if (np->n_flag & NMODIFIED) {
  260. NFS_INVALIDATE_ATTRCACHE(np);
  261. error = nfs_vinvalbuf(vp, V_SAVE, cred, p);
  262. if (error)
  263. return (error);
  264. }
  265. if (ioflag & IO_APPEND) {
  266. NFS_INVALIDATE_ATTRCACHE(np);
  267. error = VOP_GETATTR(vp, &vattr, cred, p);
  268. if (error)
  269. return (error);
  270. uio->uio_offset = np->n_size;
  271. }
  272. }
  273. if (uio->uio_offset < 0)
  274. return (EINVAL);
  275. if (uio->uio_resid == 0)
  276. return (0);
  277. /* do the filesize rlimit check */
  278. if ((error = vn_fsizechk(vp, uio, ioflag, &overrun)))
  279. return (error);
  280. /*
  281. * update the cache write creds for this node.
  282. */
  283. if (np->n_wcred)
  284. crfree(np->n_wcred);
  285. np->n_wcred = cred;
  286. crhold(cred);
  287. /*
  288. * I use nm_rsize, not nm_wsize so that all buffer cache blocks
  289. * will be the same size within a filesystem. nfs_writerpc will
  290. * still use nm_wsize when sizing the rpc's.
  291. */
  292. biosize = nmp->nm_rsize;
  293. do {
  294. /*
  295. * XXX make sure we aren't cached in the VM page cache
  296. */
  297. uvm_vnp_uncache(vp);
  298. nfsstats.biocache_writes++;
  299. lbn = uio->uio_offset / biosize;
  300. on = uio->uio_offset & (biosize-1);
  301. n = min((unsigned)(biosize - on), uio->uio_resid);
  302. bn = lbn * (biosize / DEV_BSIZE);
  303. again:
  304. bp = nfs_getcacheblk(vp, bn, biosize, p);
  305. if (!bp) {
  306. error = EINTR;
  307. goto out;
  308. }
  309. np->n_flag |= NMODIFIED;
  310. if (uio->uio_offset + n > np->n_size) {
  311. np->n_size = uio->uio_offset + n;
  312. uvm_vnp_setsize(vp, (u_long)np->n_size);
  313. extended = 1;
  314. } else if (uio->uio_offset + n < np->n_size)
  315. truncated = 1;
  316. /*
  317. * If the new write will leave a contiguous dirty
  318. * area, just update the b_dirtyoff and b_dirtyend,
  319. * otherwise force a write rpc of the old dirty area.
  320. */
  321. if (bp->b_dirtyend > 0 &&
  322. (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
  323. bp->b_proc = p;
  324. if (VOP_BWRITE(bp) == EINTR) {
  325. error = EINTR;
  326. goto out;
  327. }
  328. goto again;
  329. }
  330. error = uiomovei((char *)bp->b_data + on, n, uio);
  331. if (error) {
  332. bp->b_flags |= B_ERROR;
  333. brelse(bp);
  334. goto out;
  335. }
  336. if (bp->b_dirtyend > 0) {
  337. bp->b_dirtyoff = min(on, bp->b_dirtyoff);
  338. bp->b_dirtyend = max((on + n), bp->b_dirtyend);
  339. } else {
  340. bp->b_dirtyoff = on;
  341. bp->b_dirtyend = on + n;
  342. }
  343. if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff ||
  344. bp->b_validoff > bp->b_dirtyend) {
  345. bp->b_validoff = bp->b_dirtyoff;
  346. bp->b_validend = bp->b_dirtyend;
  347. } else {
  348. bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
  349. bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
  350. }
  351. wrotedta = 1;
  352. /*
  353. * Since this block is being modified, it must be written
  354. * again and not just committed.
  355. */
  356. if (NFS_ISV3(vp)) {
  357. rw_enter_write(&np->n_commitlock);
  358. if (bp->b_flags & B_NEEDCOMMIT) {
  359. bp->b_flags &= ~B_NEEDCOMMIT;
  360. nfs_del_tobecommitted_range(vp, bp);
  361. }
  362. nfs_del_committed_range(vp, bp);
  363. rw_exit_write(&np->n_commitlock);
  364. } else
  365. bp->b_flags &= ~B_NEEDCOMMIT;
  366. if (ioflag & IO_SYNC) {
  367. bp->b_proc = p;
  368. error = VOP_BWRITE(bp);
  369. if (error)
  370. goto out;
  371. } else if ((n + on) == biosize) {
  372. bp->b_proc = NULL;
  373. bp->b_flags |= B_ASYNC;
  374. (void)nfs_writebp(bp, 0);
  375. } else {
  376. bdwrite(bp);
  377. }
  378. } while (uio->uio_resid > 0 && n > 0);
  379. /*out: XXX belongs here??? */
  380. if (wrotedta)
  381. VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0) |
  382. (truncated ? NOTE_TRUNCATE : 0));
  383. out:
  384. /* correct the result for writes clamped by vn_fsizechk() */
  385. uio->uio_resid += overrun;
  386. return (error);
  387. }
  388. /*
  389. * Get an nfs cache block.
  390. * Allocate a new one if the block isn't currently in the cache
  391. * and return the block marked busy. If the calling process is
  392. * interrupted by a signal for an interruptible mount point, return
  393. * NULL.
  394. */
  395. struct buf *
  396. nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct proc *p)
  397. {
  398. struct buf *bp;
  399. struct nfsmount *nmp = VFSTONFS(vp->v_mount);
  400. if (nmp->nm_flag & NFSMNT_INT) {
  401. bp = getblk(vp, bn, size, PCATCH, 0);
  402. while (bp == NULL) {
  403. if (nfs_sigintr(nmp, NULL, p))
  404. return (NULL);
  405. bp = getblk(vp, bn, size, 0, 2 * hz);
  406. }
  407. } else
  408. bp = getblk(vp, bn, size, 0, 0);
  409. return (bp);
  410. }
  411. /*
  412. * Flush and invalidate all dirty buffers. If another process is already
  413. * doing the flush, just wait for completion.
  414. */
  415. int
  416. nfs_vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p)
  417. {
  418. struct nfsmount *nmp= VFSTONFS(vp->v_mount);
  419. struct nfsnode *np = VTONFS(vp);
  420. int error, sintr, stimeo;
  421. error = sintr = stimeo = 0;
  422. if (ISSET(nmp->nm_flag, NFSMNT_INT)) {
  423. sintr = PCATCH;
  424. stimeo = 2 * hz;
  425. }
  426. /* First wait for any other process doing a flush to complete. */
  427. while (np->n_flag & NFLUSHINPROG) {
  428. np->n_flag |= NFLUSHWANT;
  429. error = tsleep(&np->n_flag, PRIBIO|sintr, "nfsvinval", stimeo);
  430. if (error && sintr && nfs_sigintr(nmp, NULL, p))
  431. return (EINTR);
  432. }
  433. /* Now, flush as required. */
  434. np->n_flag |= NFLUSHINPROG;
  435. error = vinvalbuf(vp, flags, cred, p, sintr, 0);
  436. while (error) {
  437. if (sintr && nfs_sigintr(nmp, NULL, p)) {
  438. np->n_flag &= ~NFLUSHINPROG;
  439. if (np->n_flag & NFLUSHWANT) {
  440. np->n_flag &= ~NFLUSHWANT;
  441. wakeup(&np->n_flag);
  442. }
  443. return (EINTR);
  444. }
  445. error = vinvalbuf(vp, flags, cred, p, 0, stimeo);
  446. }
  447. np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
  448. if (np->n_flag & NFLUSHWANT) {
  449. np->n_flag &= ~NFLUSHWANT;
  450. wakeup(&np->n_flag);
  451. }
  452. return (0);
  453. }
  454. /*
  455. * Initiate asynchronous I/O. Return an error if no nfsiods are available.
  456. * This is mainly to avoid queueing async I/O requests when the nfsiods
  457. * are all hung on a dead server.
  458. */
  459. int
  460. nfs_asyncio(struct buf *bp, int readahead)
  461. {
  462. if (nfs_numasync == 0)
  463. goto out;
  464. while (nfs_bufqlen > nfs_bufqmax)
  465. if (readahead)
  466. goto out;
  467. else
  468. tsleep(&nfs_bufqlen, PRIBIO, "nfs_bufq", 0);
  469. if ((bp->b_flags & B_READ) == 0) {
  470. bp->b_flags |= B_WRITEINPROG;
  471. }
  472. TAILQ_INSERT_TAIL(&nfs_bufq, bp, b_freelist);
  473. nfs_bufqlen++;
  474. wakeup_one(&nfs_bufq);
  475. return (0);
  476. out:
  477. nfsstats.forcedsync++;
  478. return (EIO);
  479. }
  480. /*
  481. * Do an I/O operation to/from a cache block. This may be called
  482. * synchronously or from an nfsiod.
  483. */
  484. int
  485. nfs_doio(struct buf *bp, struct proc *p)
  486. {
  487. struct uio *uiop;
  488. struct vnode *vp;
  489. struct nfsnode *np;
  490. struct nfsmount *nmp;
  491. int s, error = 0, diff, len, iomode, must_commit = 0;
  492. struct uio uio;
  493. struct iovec io;
  494. vp = bp->b_vp;
  495. np = VTONFS(vp);
  496. nmp = VFSTONFS(vp->v_mount);
  497. uiop = &uio;
  498. uiop->uio_iov = &io;
  499. uiop->uio_iovcnt = 1;
  500. uiop->uio_segflg = UIO_SYSSPACE;
  501. uiop->uio_procp = p;
  502. /*
  503. * Historically, paging was done with physio, but no more.
  504. */
  505. if (bp->b_flags & B_PHYS) {
  506. io.iov_len = uiop->uio_resid = bp->b_bcount;
  507. /* mapping was done by vmapbuf() */
  508. io.iov_base = bp->b_data;
  509. uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
  510. if (bp->b_flags & B_READ) {
  511. uiop->uio_rw = UIO_READ;
  512. nfsstats.read_physios++;
  513. error = nfs_readrpc(vp, uiop);
  514. } else {
  515. iomode = NFSV3WRITE_DATASYNC;
  516. uiop->uio_rw = UIO_WRITE;
  517. nfsstats.write_physios++;
  518. error = nfs_writerpc(vp, uiop, &iomode, &must_commit);
  519. }
  520. if (error) {
  521. bp->b_flags |= B_ERROR;
  522. bp->b_error = error;
  523. }
  524. } else if (bp->b_flags & B_READ) {
  525. io.iov_len = uiop->uio_resid = bp->b_bcount;
  526. io.iov_base = bp->b_data;
  527. uiop->uio_rw = UIO_READ;
  528. switch (vp->v_type) {
  529. case VREG:
  530. uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
  531. nfsstats.read_bios++;
  532. bcstats.pendingreads++;
  533. bcstats.numreads++;
  534. error = nfs_readrpc(vp, uiop);
  535. if (!error) {
  536. bp->b_validoff = 0;
  537. if (uiop->uio_resid) {
  538. /*
  539. * If len > 0, there is a hole in the file and
  540. * no writes after the hole have been pushed to
  541. * the server yet.
  542. * Just zero fill the rest of the valid area.
  543. */
  544. diff = bp->b_bcount - uiop->uio_resid;
  545. len = np->n_size - ((((off_t)bp->b_blkno) << DEV_BSHIFT)
  546. + diff);
  547. if (len > 0) {
  548. len = min(len, uiop->uio_resid);
  549. memset((char *)bp->b_data + diff, 0, len);
  550. bp->b_validend = diff + len;
  551. } else
  552. bp->b_validend = diff;
  553. } else
  554. bp->b_validend = bp->b_bcount;
  555. }
  556. if (p && (vp->v_flag & VTEXT) &&
  557. (timespeccmp(&np->n_mtime, &np->n_vattr.va_mtime, !=))) {
  558. uprintf("Process killed due to text file modification\n");
  559. psignal(p, SIGKILL);
  560. }
  561. break;
  562. case VLNK:
  563. uiop->uio_offset = (off_t)0;
  564. nfsstats.readlink_bios++;
  565. bcstats.pendingreads++;
  566. bcstats.numreads++;
  567. error = nfs_readlinkrpc(vp, uiop, curproc->p_ucred);
  568. break;
  569. default:
  570. panic("nfs_doio: type %x unexpected", vp->v_type);
  571. break;
  572. };
  573. if (error) {
  574. bp->b_flags |= B_ERROR;
  575. bp->b_error = error;
  576. }
  577. } else {
  578. io.iov_len = uiop->uio_resid = bp->b_dirtyend
  579. - bp->b_dirtyoff;
  580. uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE
  581. + bp->b_dirtyoff;
  582. io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
  583. uiop->uio_rw = UIO_WRITE;
  584. nfsstats.write_bios++;
  585. bcstats.pendingwrites++;
  586. bcstats.numwrites++;
  587. if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE)) == B_ASYNC)
  588. iomode = NFSV3WRITE_UNSTABLE;
  589. else
  590. iomode = NFSV3WRITE_FILESYNC;
  591. bp->b_flags |= B_WRITEINPROG;
  592. error = nfs_writerpc(vp, uiop, &iomode, &must_commit);
  593. rw_enter_write(&np->n_commitlock);
  594. if (!error && iomode == NFSV3WRITE_UNSTABLE) {
  595. bp->b_flags |= B_NEEDCOMMIT;
  596. nfs_add_tobecommitted_range(vp, bp);
  597. } else {
  598. bp->b_flags &= ~B_NEEDCOMMIT;
  599. nfs_del_committed_range(vp, bp);
  600. }
  601. rw_exit_write(&np->n_commitlock);
  602. bp->b_flags &= ~B_WRITEINPROG;
  603. /*
  604. * For an interrupted write, the buffer is still valid and the
  605. * write hasn't been pushed to the server yet, so we can't set
  606. * B_ERROR and report the interruption by setting B_EINTR. For
  607. * the B_ASYNC case, B_EINTR is not relevant, so the rpc attempt
  608. * is essentially a noop.
  609. * For the case of a V3 write rpc not being committed to stable
  610. * storage, the block is still dirty and requires either a commit
  611. * rpc or another write rpc with iomode == NFSV3WRITE_FILESYNC
  612. * before the block is reused. This is indicated by setting the
  613. * B_DELWRI and B_NEEDCOMMIT flags.
  614. */
  615. if (error == EINTR || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
  616. s = splbio();
  617. buf_dirty(bp);
  618. splx(s);
  619. if (!(bp->b_flags & B_ASYNC) && error)
  620. bp->b_flags |= B_EINTR;
  621. } else {
  622. if (error) {
  623. bp->b_flags |= B_ERROR;
  624. bp->b_error = np->n_error = error;
  625. np->n_flag |= NWRITEERR;
  626. }
  627. bp->b_dirtyoff = bp->b_dirtyend = 0;
  628. }
  629. }
  630. bp->b_resid = uiop->uio_resid;
  631. if (must_commit)
  632. nfs_clearcommit(vp->v_mount);
  633. s = splbio();
  634. biodone(bp);
  635. splx(s);
  636. return (error);
  637. }