file.c 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042
  1. /*
  2. * file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
  3. *
  4. * Copyright (c) 2001-2015 Anton Altaparmakov and Tuxera Inc.
  5. *
  6. * This program/include file is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as published
  8. * by the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program/include file is distributed in the hope that it will be
  12. * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
  13. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program (in the main directory of the Linux-NTFS
  18. * distribution in the file COPYING); if not, write to the Free Software
  19. * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. */
  21. #include <linux/backing-dev.h>
  22. #include <linux/buffer_head.h>
  23. #include <linux/gfp.h>
  24. #include <linux/pagemap.h>
  25. #include <linux/pagevec.h>
  26. #include <linux/sched.h>
  27. #include <linux/swap.h>
  28. #include <linux/uio.h>
  29. #include <linux/writeback.h>
  30. #include <asm/page.h>
  31. #include <asm/uaccess.h>
  32. #include "attrib.h"
  33. #include "bitmap.h"
  34. #include "inode.h"
  35. #include "debug.h"
  36. #include "lcnalloc.h"
  37. #include "malloc.h"
  38. #include "mft.h"
  39. #include "ntfs.h"
  40. /**
  41. * ntfs_file_open - called when an inode is about to be opened
  42. * @vi: inode to be opened
  43. * @filp: file structure describing the inode
  44. *
  45. * Limit file size to the page cache limit on architectures where unsigned long
  46. * is 32-bits. This is the most we can do for now without overflowing the page
  47. * cache page index. Doing it this way means we don't run into problems because
  48. * of existing too large files. It would be better to allow the user to read
  49. * the beginning of the file but I doubt very much anyone is going to hit this
  50. * check on a 32-bit architecture, so there is no point in adding the extra
  51. * complexity required to support this.
  52. *
  53. * On 64-bit architectures, the check is hopefully optimized away by the
  54. * compiler.
  55. *
  56. * After the check passes, just call generic_file_open() to do its work.
  57. */
  58. static int ntfs_file_open(struct inode *vi, struct file *filp)
  59. {
  60. if (sizeof(unsigned long) < 8) {
  61. if (i_size_read(vi) > MAX_LFS_FILESIZE)
  62. return -EOVERFLOW;
  63. }
  64. return generic_file_open(vi, filp);
  65. }
  66. #ifdef NTFS_RW
  67. /**
  68. * ntfs_attr_extend_initialized - extend the initialized size of an attribute
  69. * @ni: ntfs inode of the attribute to extend
  70. * @new_init_size: requested new initialized size in bytes
  71. *
  72. * Extend the initialized size of an attribute described by the ntfs inode @ni
  73. * to @new_init_size bytes. This involves zeroing any non-sparse space between
  74. * the old initialized size and @new_init_size both in the page cache and on
  75. * disk (if relevant complete pages are already uptodate in the page cache then
  76. * these are simply marked dirty).
  77. *
  78. * As a side-effect, the file size (vfs inode->i_size) may be incremented as,
  79. * in the resident attribute case, it is tied to the initialized size and, in
  80. * the non-resident attribute case, it may not fall below the initialized size.
  81. *
  82. * Note that if the attribute is resident, we do not need to touch the page
  83. * cache at all. This is because if the page cache page is not uptodate we
  84. * bring it uptodate later, when doing the write to the mft record since we
  85. * then already have the page mapped. And if the page is uptodate, the
  86. * non-initialized region will already have been zeroed when the page was
  87. * brought uptodate and the region may in fact already have been overwritten
  88. * with new data via mmap() based writes, so we cannot just zero it. And since
  89. * POSIX specifies that the behaviour of resizing a file whilst it is mmap()ped
  90. * is unspecified, we choose not to do zeroing and thus we do not need to touch
  91. * the page at all. For a more detailed explanation see ntfs_truncate() in
  92. * fs/ntfs/inode.c.
  93. *
  94. * Return 0 on success and -errno on error. In the case that an error is
  95. * encountered it is possible that the initialized size will already have been
  96. * incremented some way towards @new_init_size but it is guaranteed that if
  97. * this is the case, the necessary zeroing will also have happened and that all
  98. * metadata is self-consistent.
  99. *
  100. * Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be
  101. * held by the caller.
  102. */
  103. static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size)
  104. {
  105. s64 old_init_size;
  106. loff_t old_i_size;
  107. pgoff_t index, end_index;
  108. unsigned long flags;
  109. struct inode *vi = VFS_I(ni);
  110. ntfs_inode *base_ni;
  111. MFT_RECORD *m = NULL;
  112. ATTR_RECORD *a;
  113. ntfs_attr_search_ctx *ctx = NULL;
  114. struct address_space *mapping;
  115. struct page *page = NULL;
  116. u8 *kattr;
  117. int err;
  118. u32 attr_len;
  119. read_lock_irqsave(&ni->size_lock, flags);
  120. old_init_size = ni->initialized_size;
  121. old_i_size = i_size_read(vi);
  122. BUG_ON(new_init_size > ni->allocated_size);
  123. read_unlock_irqrestore(&ni->size_lock, flags);
  124. ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
  125. "old_initialized_size 0x%llx, "
  126. "new_initialized_size 0x%llx, i_size 0x%llx.",
  127. vi->i_ino, (unsigned)le32_to_cpu(ni->type),
  128. (unsigned long long)old_init_size,
  129. (unsigned long long)new_init_size, old_i_size);
  130. if (!NInoAttr(ni))
  131. base_ni = ni;
  132. else
  133. base_ni = ni->ext.base_ntfs_ino;
  134. /* Use goto to reduce indentation and we need the label below anyway. */
  135. if (NInoNonResident(ni))
  136. goto do_non_resident_extend;
  137. BUG_ON(old_init_size != old_i_size);
  138. m = map_mft_record(base_ni);
  139. if (IS_ERR(m)) {
  140. err = PTR_ERR(m);
  141. m = NULL;
  142. goto err_out;
  143. }
  144. ctx = ntfs_attr_get_search_ctx(base_ni, m);
  145. if (unlikely(!ctx)) {
  146. err = -ENOMEM;
  147. goto err_out;
  148. }
  149. err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
  150. CASE_SENSITIVE, 0, NULL, 0, ctx);
  151. if (unlikely(err)) {
  152. if (err == -ENOENT)
  153. err = -EIO;
  154. goto err_out;
  155. }
  156. m = ctx->mrec;
  157. a = ctx->attr;
  158. BUG_ON(a->non_resident);
  159. /* The total length of the attribute value. */
  160. attr_len = le32_to_cpu(a->data.resident.value_length);
  161. BUG_ON(old_i_size != (loff_t)attr_len);
  162. /*
  163. * Do the zeroing in the mft record and update the attribute size in
  164. * the mft record.
  165. */
  166. kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
  167. memset(kattr + attr_len, 0, new_init_size - attr_len);
  168. a->data.resident.value_length = cpu_to_le32((u32)new_init_size);
  169. /* Finally, update the sizes in the vfs and ntfs inodes. */
  170. write_lock_irqsave(&ni->size_lock, flags);
  171. i_size_write(vi, new_init_size);
  172. ni->initialized_size = new_init_size;
  173. write_unlock_irqrestore(&ni->size_lock, flags);
  174. goto done;
  175. do_non_resident_extend:
  176. /*
  177. * If the new initialized size @new_init_size exceeds the current file
  178. * size (vfs inode->i_size), we need to extend the file size to the
  179. * new initialized size.
  180. */
  181. if (new_init_size > old_i_size) {
  182. m = map_mft_record(base_ni);
  183. if (IS_ERR(m)) {
  184. err = PTR_ERR(m);
  185. m = NULL;
  186. goto err_out;
  187. }
  188. ctx = ntfs_attr_get_search_ctx(base_ni, m);
  189. if (unlikely(!ctx)) {
  190. err = -ENOMEM;
  191. goto err_out;
  192. }
  193. err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
  194. CASE_SENSITIVE, 0, NULL, 0, ctx);
  195. if (unlikely(err)) {
  196. if (err == -ENOENT)
  197. err = -EIO;
  198. goto err_out;
  199. }
  200. m = ctx->mrec;
  201. a = ctx->attr;
  202. BUG_ON(!a->non_resident);
  203. BUG_ON(old_i_size != (loff_t)
  204. sle64_to_cpu(a->data.non_resident.data_size));
  205. a->data.non_resident.data_size = cpu_to_sle64(new_init_size);
  206. flush_dcache_mft_record_page(ctx->ntfs_ino);
  207. mark_mft_record_dirty(ctx->ntfs_ino);
  208. /* Update the file size in the vfs inode. */
  209. i_size_write(vi, new_init_size);
  210. ntfs_attr_put_search_ctx(ctx);
  211. ctx = NULL;
  212. unmap_mft_record(base_ni);
  213. m = NULL;
  214. }
  215. mapping = vi->i_mapping;
  216. index = old_init_size >> PAGE_SHIFT;
  217. end_index = (new_init_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  218. do {
  219. /*
  220. * Read the page. If the page is not present, this will zero
  221. * the uninitialized regions for us.
  222. */
  223. page = read_mapping_page(mapping, index, NULL);
  224. if (IS_ERR(page)) {
  225. err = PTR_ERR(page);
  226. goto init_err_out;
  227. }
  228. if (unlikely(PageError(page))) {
  229. put_page(page);
  230. err = -EIO;
  231. goto init_err_out;
  232. }
  233. /*
  234. * Update the initialized size in the ntfs inode. This is
  235. * enough to make ntfs_writepage() work.
  236. */
  237. write_lock_irqsave(&ni->size_lock, flags);
  238. ni->initialized_size = (s64)(index + 1) << PAGE_SHIFT;
  239. if (ni->initialized_size > new_init_size)
  240. ni->initialized_size = new_init_size;
  241. write_unlock_irqrestore(&ni->size_lock, flags);
  242. /* Set the page dirty so it gets written out. */
  243. set_page_dirty(page);
  244. put_page(page);
  245. /*
  246. * Play nice with the vm and the rest of the system. This is
  247. * very much needed as we can potentially be modifying the
  248. * initialised size from a very small value to a really huge
  249. * value, e.g.
  250. * f = open(somefile, O_TRUNC);
  251. * truncate(f, 10GiB);
  252. * seek(f, 10GiB);
  253. * write(f, 1);
  254. * And this would mean we would be marking dirty hundreds of
  255. * thousands of pages or as in the above example more than
  256. * two and a half million pages!
  257. *
  258. * TODO: For sparse pages could optimize this workload by using
  259. * the FsMisc / MiscFs page bit as a "PageIsSparse" bit. This
  260. * would be set in readpage for sparse pages and here we would
  261. * not need to mark dirty any pages which have this bit set.
  262. * The only caveat is that we have to clear the bit everywhere
  263. * where we allocate any clusters that lie in the page or that
  264. * contain the page.
  265. *
  266. * TODO: An even greater optimization would be for us to only
  267. * call readpage() on pages which are not in sparse regions as
  268. * determined from the runlist. This would greatly reduce the
  269. * number of pages we read and make dirty in the case of sparse
  270. * files.
  271. */
  272. balance_dirty_pages_ratelimited(mapping);
  273. cond_resched();
  274. } while (++index < end_index);
  275. read_lock_irqsave(&ni->size_lock, flags);
  276. BUG_ON(ni->initialized_size != new_init_size);
  277. read_unlock_irqrestore(&ni->size_lock, flags);
  278. /* Now bring in sync the initialized_size in the mft record. */
  279. m = map_mft_record(base_ni);
  280. if (IS_ERR(m)) {
  281. err = PTR_ERR(m);
  282. m = NULL;
  283. goto init_err_out;
  284. }
  285. ctx = ntfs_attr_get_search_ctx(base_ni, m);
  286. if (unlikely(!ctx)) {
  287. err = -ENOMEM;
  288. goto init_err_out;
  289. }
  290. err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
  291. CASE_SENSITIVE, 0, NULL, 0, ctx);
  292. if (unlikely(err)) {
  293. if (err == -ENOENT)
  294. err = -EIO;
  295. goto init_err_out;
  296. }
  297. m = ctx->mrec;
  298. a = ctx->attr;
  299. BUG_ON(!a->non_resident);
  300. a->data.non_resident.initialized_size = cpu_to_sle64(new_init_size);
  301. done:
  302. flush_dcache_mft_record_page(ctx->ntfs_ino);
  303. mark_mft_record_dirty(ctx->ntfs_ino);
  304. if (ctx)
  305. ntfs_attr_put_search_ctx(ctx);
  306. if (m)
  307. unmap_mft_record(base_ni);
  308. ntfs_debug("Done, initialized_size 0x%llx, i_size 0x%llx.",
  309. (unsigned long long)new_init_size, i_size_read(vi));
  310. return 0;
  311. init_err_out:
  312. write_lock_irqsave(&ni->size_lock, flags);
  313. ni->initialized_size = old_init_size;
  314. write_unlock_irqrestore(&ni->size_lock, flags);
  315. err_out:
  316. if (ctx)
  317. ntfs_attr_put_search_ctx(ctx);
  318. if (m)
  319. unmap_mft_record(base_ni);
  320. ntfs_debug("Failed. Returning error code %i.", err);
  321. return err;
  322. }
  323. static ssize_t ntfs_prepare_file_for_write(struct kiocb *iocb,
  324. struct iov_iter *from)
  325. {
  326. loff_t pos;
  327. s64 end, ll;
  328. ssize_t err;
  329. unsigned long flags;
  330. struct file *file = iocb->ki_filp;
  331. struct inode *vi = file_inode(file);
  332. ntfs_inode *base_ni, *ni = NTFS_I(vi);
  333. ntfs_volume *vol = ni->vol;
  334. ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
  335. "0x%llx, count 0x%zx.", vi->i_ino,
  336. (unsigned)le32_to_cpu(ni->type),
  337. (unsigned long long)iocb->ki_pos,
  338. iov_iter_count(from));
  339. err = generic_write_checks(iocb, from);
  340. if (unlikely(err <= 0))
  341. goto out;
  342. /*
  343. * All checks have passed. Before we start doing any writing we want
  344. * to abort any totally illegal writes.
  345. */
  346. BUG_ON(NInoMstProtected(ni));
  347. BUG_ON(ni->type != AT_DATA);
  348. /* If file is encrypted, deny access, just like NT4. */
  349. if (NInoEncrypted(ni)) {
  350. /* Only $DATA attributes can be encrypted. */
  351. /*
  352. * Reminder for later: Encrypted files are _always_
  353. * non-resident so that the content can always be encrypted.
  354. */
  355. ntfs_debug("Denying write access to encrypted file.");
  356. err = -EACCES;
  357. goto out;
  358. }
  359. if (NInoCompressed(ni)) {
  360. /* Only unnamed $DATA attribute can be compressed. */
  361. BUG_ON(ni->name_len);
  362. /*
  363. * Reminder for later: If resident, the data is not actually
  364. * compressed. Only on the switch to non-resident does
  365. * compression kick in. This is in contrast to encrypted files
  366. * (see above).
  367. */
  368. ntfs_error(vi->i_sb, "Writing to compressed files is not "
  369. "implemented yet. Sorry.");
  370. err = -EOPNOTSUPP;
  371. goto out;
  372. }
  373. base_ni = ni;
  374. if (NInoAttr(ni))
  375. base_ni = ni->ext.base_ntfs_ino;
  376. err = file_remove_privs(file);
  377. if (unlikely(err))
  378. goto out;
  379. /*
  380. * Our ->update_time method always succeeds thus file_update_time()
  381. * cannot fail either so there is no need to check the return code.
  382. */
  383. file_update_time(file);
  384. pos = iocb->ki_pos;
  385. /* The first byte after the last cluster being written to. */
  386. end = (pos + iov_iter_count(from) + vol->cluster_size_mask) &
  387. ~(u64)vol->cluster_size_mask;
  388. /*
  389. * If the write goes beyond the allocated size, extend the allocation
  390. * to cover the whole of the write, rounded up to the nearest cluster.
  391. */
  392. read_lock_irqsave(&ni->size_lock, flags);
  393. ll = ni->allocated_size;
  394. read_unlock_irqrestore(&ni->size_lock, flags);
  395. if (end > ll) {
  396. /*
  397. * Extend the allocation without changing the data size.
  398. *
  399. * Note we ensure the allocation is big enough to at least
  400. * write some data but we do not require the allocation to be
  401. * complete, i.e. it may be partial.
  402. */
  403. ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
  404. if (likely(ll >= 0)) {
  405. BUG_ON(pos >= ll);
  406. /* If the extension was partial truncate the write. */
  407. if (end > ll) {
  408. ntfs_debug("Truncating write to inode 0x%lx, "
  409. "attribute type 0x%x, because "
  410. "the allocation was only "
  411. "partially extended.",
  412. vi->i_ino, (unsigned)
  413. le32_to_cpu(ni->type));
  414. iov_iter_truncate(from, ll - pos);
  415. }
  416. } else {
  417. err = ll;
  418. read_lock_irqsave(&ni->size_lock, flags);
  419. ll = ni->allocated_size;
  420. read_unlock_irqrestore(&ni->size_lock, flags);
  421. /* Perform a partial write if possible or fail. */
  422. if (pos < ll) {
  423. ntfs_debug("Truncating write to inode 0x%lx "
  424. "attribute type 0x%x, because "
  425. "extending the allocation "
  426. "failed (error %d).",
  427. vi->i_ino, (unsigned)
  428. le32_to_cpu(ni->type),
  429. (int)-err);
  430. iov_iter_truncate(from, ll - pos);
  431. } else {
  432. if (err != -ENOSPC)
  433. ntfs_error(vi->i_sb, "Cannot perform "
  434. "write to inode "
  435. "0x%lx, attribute "
  436. "type 0x%x, because "
  437. "extending the "
  438. "allocation failed "
  439. "(error %ld).",
  440. vi->i_ino, (unsigned)
  441. le32_to_cpu(ni->type),
  442. (long)-err);
  443. else
  444. ntfs_debug("Cannot perform write to "
  445. "inode 0x%lx, "
  446. "attribute type 0x%x, "
  447. "because there is not "
  448. "space left.",
  449. vi->i_ino, (unsigned)
  450. le32_to_cpu(ni->type));
  451. goto out;
  452. }
  453. }
  454. }
  455. /*
  456. * If the write starts beyond the initialized size, extend it up to the
  457. * beginning of the write and initialize all non-sparse space between
  458. * the old initialized size and the new one. This automatically also
  459. * increments the vfs inode->i_size to keep it above or equal to the
  460. * initialized_size.
  461. */
  462. read_lock_irqsave(&ni->size_lock, flags);
  463. ll = ni->initialized_size;
  464. read_unlock_irqrestore(&ni->size_lock, flags);
  465. if (pos > ll) {
  466. /*
  467. * Wait for ongoing direct i/o to complete before proceeding.
  468. * New direct i/o cannot start as we hold i_mutex.
  469. */
  470. inode_dio_wait(vi);
  471. err = ntfs_attr_extend_initialized(ni, pos);
  472. if (unlikely(err < 0))
  473. ntfs_error(vi->i_sb, "Cannot perform write to inode "
  474. "0x%lx, attribute type 0x%x, because "
  475. "extending the initialized size "
  476. "failed (error %d).", vi->i_ino,
  477. (unsigned)le32_to_cpu(ni->type),
  478. (int)-err);
  479. }
  480. out:
  481. return err;
  482. }
  483. /**
  484. * __ntfs_grab_cache_pages - obtain a number of locked pages
  485. * @mapping: address space mapping from which to obtain page cache pages
  486. * @index: starting index in @mapping at which to begin obtaining pages
  487. * @nr_pages: number of page cache pages to obtain
  488. * @pages: array of pages in which to return the obtained page cache pages
  489. * @cached_page: allocated but as yet unused page
  490. *
  491. * Obtain @nr_pages locked page cache pages from the mapping @mapping and
  492. * starting at index @index.
  493. *
  494. * If a page is newly created, add it to lru list
  495. *
  496. * Note, the page locks are obtained in ascending page index order.
  497. */
  498. static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
  499. pgoff_t index, const unsigned nr_pages, struct page **pages,
  500. struct page **cached_page)
  501. {
  502. int err, nr;
  503. BUG_ON(!nr_pages);
  504. err = nr = 0;
  505. do {
  506. pages[nr] = find_get_page_flags(mapping, index, FGP_LOCK |
  507. FGP_ACCESSED);
  508. if (!pages[nr]) {
  509. if (!*cached_page) {
  510. *cached_page = page_cache_alloc(mapping);
  511. if (unlikely(!*cached_page)) {
  512. err = -ENOMEM;
  513. goto err_out;
  514. }
  515. }
  516. err = add_to_page_cache_lru(*cached_page, mapping,
  517. index,
  518. mapping_gfp_constraint(mapping, GFP_KERNEL));
  519. if (unlikely(err)) {
  520. if (err == -EEXIST)
  521. continue;
  522. goto err_out;
  523. }
  524. pages[nr] = *cached_page;
  525. *cached_page = NULL;
  526. }
  527. index++;
  528. nr++;
  529. } while (nr < nr_pages);
  530. out:
  531. return err;
  532. err_out:
  533. while (nr > 0) {
  534. unlock_page(pages[--nr]);
  535. put_page(pages[nr]);
  536. }
  537. goto out;
  538. }
  539. static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
  540. {
  541. lock_buffer(bh);
  542. get_bh(bh);
  543. bh->b_end_io = end_buffer_read_sync;
  544. return submit_bh(REQ_OP_READ, 0, bh);
  545. }
  546. /**
  547. * ntfs_prepare_pages_for_non_resident_write - prepare pages for receiving data
  548. * @pages: array of destination pages
  549. * @nr_pages: number of pages in @pages
  550. * @pos: byte position in file at which the write begins
  551. * @bytes: number of bytes to be written
  552. *
  553. * This is called for non-resident attributes from ntfs_file_buffered_write()
  554. * with i_mutex held on the inode (@pages[0]->mapping->host). There are
  555. * @nr_pages pages in @pages which are locked but not kmap()ped. The source
  556. * data has not yet been copied into the @pages.
  557. *
  558. * Need to fill any holes with actual clusters, allocate buffers if necessary,
  559. * ensure all the buffers are mapped, and bring uptodate any buffers that are
  560. * only partially being written to.
  561. *
  562. * If @nr_pages is greater than one, we are guaranteed that the cluster size is
  563. * greater than PAGE_SIZE, that all pages in @pages are entirely inside
  564. * the same cluster and that they are the entirety of that cluster, and that
  565. * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
  566. *
  567. * i_size is not to be modified yet.
  568. *
  569. * Return 0 on success or -errno on error.
  570. */
  571. static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
  572. unsigned nr_pages, s64 pos, size_t bytes)
  573. {
  574. VCN vcn, highest_vcn = 0, cpos, cend, bh_cpos, bh_cend;
  575. LCN lcn;
  576. s64 bh_pos, vcn_len, end, initialized_size;
  577. sector_t lcn_block;
  578. struct page *page;
  579. struct inode *vi;
  580. ntfs_inode *ni, *base_ni = NULL;
  581. ntfs_volume *vol;
  582. runlist_element *rl, *rl2;
  583. struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
  584. ntfs_attr_search_ctx *ctx = NULL;
  585. MFT_RECORD *m = NULL;
  586. ATTR_RECORD *a = NULL;
  587. unsigned long flags;
  588. u32 attr_rec_len = 0;
  589. unsigned blocksize, u;
  590. int err, mp_size;
  591. bool rl_write_locked, was_hole, is_retry;
  592. unsigned char blocksize_bits;
  593. struct {
  594. u8 runlist_merged:1;
  595. u8 mft_attr_mapped:1;
  596. u8 mp_rebuilt:1;
  597. u8 attr_switched:1;
  598. } status = { 0, 0, 0, 0 };
  599. BUG_ON(!nr_pages);
  600. BUG_ON(!pages);
  601. BUG_ON(!*pages);
  602. vi = pages[0]->mapping->host;
  603. ni = NTFS_I(vi);
  604. vol = ni->vol;
  605. ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
  606. "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
  607. vi->i_ino, ni->type, pages[0]->index, nr_pages,
  608. (long long)pos, bytes);
  609. blocksize = vol->sb->s_blocksize;
  610. blocksize_bits = vol->sb->s_blocksize_bits;
  611. u = 0;
  612. do {
  613. page = pages[u];
  614. BUG_ON(!page);
  615. /*
  616. * create_empty_buffers() will create uptodate/dirty buffers if
  617. * the page is uptodate/dirty.
  618. */
  619. if (!page_has_buffers(page)) {
  620. create_empty_buffers(page, blocksize, 0);
  621. if (unlikely(!page_has_buffers(page)))
  622. return -ENOMEM;
  623. }
  624. } while (++u < nr_pages);
  625. rl_write_locked = false;
  626. rl = NULL;
  627. err = 0;
  628. vcn = lcn = -1;
  629. vcn_len = 0;
  630. lcn_block = -1;
  631. was_hole = false;
  632. cpos = pos >> vol->cluster_size_bits;
  633. end = pos + bytes;
  634. cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
  635. /*
  636. * Loop over each page and for each page over each buffer. Use goto to
  637. * reduce indentation.
  638. */
  639. u = 0;
  640. do_next_page:
  641. page = pages[u];
  642. bh_pos = (s64)page->index << PAGE_SHIFT;
  643. bh = head = page_buffers(page);
  644. do {
  645. VCN cdelta;
  646. s64 bh_end;
  647. unsigned bh_cofs;
  648. /* Clear buffer_new on all buffers to reinitialise state. */
  649. if (buffer_new(bh))
  650. clear_buffer_new(bh);
  651. bh_end = bh_pos + blocksize;
  652. bh_cpos = bh_pos >> vol->cluster_size_bits;
  653. bh_cofs = bh_pos & vol->cluster_size_mask;
  654. if (buffer_mapped(bh)) {
  655. /*
  656. * The buffer is already mapped. If it is uptodate,
  657. * ignore it.
  658. */
  659. if (buffer_uptodate(bh))
  660. continue;
  661. /*
  662. * The buffer is not uptodate. If the page is uptodate
  663. * set the buffer uptodate and otherwise ignore it.
  664. */
  665. if (PageUptodate(page)) {
  666. set_buffer_uptodate(bh);
  667. continue;
  668. }
  669. /*
  670. * Neither the page nor the buffer are uptodate. If
  671. * the buffer is only partially being written to, we
  672. * need to read it in before the write, i.e. now.
  673. */
  674. if ((bh_pos < pos && bh_end > pos) ||
  675. (bh_pos < end && bh_end > end)) {
  676. /*
  677. * If the buffer is fully or partially within
  678. * the initialized size, do an actual read.
  679. * Otherwise, simply zero the buffer.
  680. */
  681. read_lock_irqsave(&ni->size_lock, flags);
  682. initialized_size = ni->initialized_size;
  683. read_unlock_irqrestore(&ni->size_lock, flags);
  684. if (bh_pos < initialized_size) {
  685. ntfs_submit_bh_for_read(bh);
  686. *wait_bh++ = bh;
  687. } else {
  688. zero_user(page, bh_offset(bh),
  689. blocksize);
  690. set_buffer_uptodate(bh);
  691. }
  692. }
  693. continue;
  694. }
  695. /* Unmapped buffer. Need to map it. */
  696. bh->b_bdev = vol->sb->s_bdev;
  697. /*
  698. * If the current buffer is in the same clusters as the map
  699. * cache, there is no need to check the runlist again. The
  700. * map cache is made up of @vcn, which is the first cached file
  701. * cluster, @vcn_len which is the number of cached file
  702. * clusters, @lcn is the device cluster corresponding to @vcn,
  703. * and @lcn_block is the block number corresponding to @lcn.
  704. */
  705. cdelta = bh_cpos - vcn;
  706. if (likely(!cdelta || (cdelta > 0 && cdelta < vcn_len))) {
  707. map_buffer_cached:
  708. BUG_ON(lcn < 0);
  709. bh->b_blocknr = lcn_block +
  710. (cdelta << (vol->cluster_size_bits -
  711. blocksize_bits)) +
  712. (bh_cofs >> blocksize_bits);
  713. set_buffer_mapped(bh);
  714. /*
  715. * If the page is uptodate so is the buffer. If the
  716. * buffer is fully outside the write, we ignore it if
  717. * it was already allocated and we mark it dirty so it
  718. * gets written out if we allocated it. On the other
  719. * hand, if we allocated the buffer but we are not
  720. * marking it dirty we set buffer_new so we can do
  721. * error recovery.
  722. */
  723. if (PageUptodate(page)) {
  724. if (!buffer_uptodate(bh))
  725. set_buffer_uptodate(bh);
  726. if (unlikely(was_hole)) {
  727. /* We allocated the buffer. */
  728. unmap_underlying_metadata(bh->b_bdev,
  729. bh->b_blocknr);
  730. if (bh_end <= pos || bh_pos >= end)
  731. mark_buffer_dirty(bh);
  732. else
  733. set_buffer_new(bh);
  734. }
  735. continue;
  736. }
  737. /* Page is _not_ uptodate. */
  738. if (likely(!was_hole)) {
  739. /*
  740. * Buffer was already allocated. If it is not
  741. * uptodate and is only partially being written
  742. * to, we need to read it in before the write,
  743. * i.e. now.
  744. */
  745. if (!buffer_uptodate(bh) && bh_pos < end &&
  746. bh_end > pos &&
  747. (bh_pos < pos ||
  748. bh_end > end)) {
  749. /*
  750. * If the buffer is fully or partially
  751. * within the initialized size, do an
  752. * actual read. Otherwise, simply zero
  753. * the buffer.
  754. */
  755. read_lock_irqsave(&ni->size_lock,
  756. flags);
  757. initialized_size = ni->initialized_size;
  758. read_unlock_irqrestore(&ni->size_lock,
  759. flags);
  760. if (bh_pos < initialized_size) {
  761. ntfs_submit_bh_for_read(bh);
  762. *wait_bh++ = bh;
  763. } else {
  764. zero_user(page, bh_offset(bh),
  765. blocksize);
  766. set_buffer_uptodate(bh);
  767. }
  768. }
  769. continue;
  770. }
  771. /* We allocated the buffer. */
  772. unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
  773. /*
  774. * If the buffer is fully outside the write, zero it,
  775. * set it uptodate, and mark it dirty so it gets
  776. * written out. If it is partially being written to,
  777. * zero region surrounding the write but leave it to
  778. * commit write to do anything else. Finally, if the
  779. * buffer is fully being overwritten, do nothing.
  780. */
  781. if (bh_end <= pos || bh_pos >= end) {
  782. if (!buffer_uptodate(bh)) {
  783. zero_user(page, bh_offset(bh),
  784. blocksize);
  785. set_buffer_uptodate(bh);
  786. }
  787. mark_buffer_dirty(bh);
  788. continue;
  789. }
  790. set_buffer_new(bh);
  791. if (!buffer_uptodate(bh) &&
  792. (bh_pos < pos || bh_end > end)) {
  793. u8 *kaddr;
  794. unsigned pofs;
  795. kaddr = kmap_atomic(page);
  796. if (bh_pos < pos) {
  797. pofs = bh_pos & ~PAGE_MASK;
  798. memset(kaddr + pofs, 0, pos - bh_pos);
  799. }
  800. if (bh_end > end) {
  801. pofs = end & ~PAGE_MASK;
  802. memset(kaddr + pofs, 0, bh_end - end);
  803. }
  804. kunmap_atomic(kaddr);
  805. flush_dcache_page(page);
  806. }
  807. continue;
  808. }
  809. /*
  810. * Slow path: this is the first buffer in the cluster. If it
  811. * is outside allocated size and is not uptodate, zero it and
  812. * set it uptodate.
  813. */
  814. read_lock_irqsave(&ni->size_lock, flags);
  815. initialized_size = ni->allocated_size;
  816. read_unlock_irqrestore(&ni->size_lock, flags);
  817. if (bh_pos > initialized_size) {
  818. if (PageUptodate(page)) {
  819. if (!buffer_uptodate(bh))
  820. set_buffer_uptodate(bh);
  821. } else if (!buffer_uptodate(bh)) {
  822. zero_user(page, bh_offset(bh), blocksize);
  823. set_buffer_uptodate(bh);
  824. }
  825. continue;
  826. }
  827. is_retry = false;
  828. if (!rl) {
  829. down_read(&ni->runlist.lock);
  830. retry_remap:
  831. rl = ni->runlist.rl;
  832. }
  833. if (likely(rl != NULL)) {
  834. /* Seek to element containing target cluster. */
  835. while (rl->length && rl[1].vcn <= bh_cpos)
  836. rl++;
  837. lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos);
  838. if (likely(lcn >= 0)) {
  839. /*
  840. * Successful remap, setup the map cache and
  841. * use that to deal with the buffer.
  842. */
  843. was_hole = false;
  844. vcn = bh_cpos;
  845. vcn_len = rl[1].vcn - vcn;
  846. lcn_block = lcn << (vol->cluster_size_bits -
  847. blocksize_bits);
  848. cdelta = 0;
  849. /*
  850. * If the number of remaining clusters touched
  851. * by the write is smaller or equal to the
  852. * number of cached clusters, unlock the
  853. * runlist as the map cache will be used from
  854. * now on.
  855. */
  856. if (likely(vcn + vcn_len >= cend)) {
  857. if (rl_write_locked) {
  858. up_write(&ni->runlist.lock);
  859. rl_write_locked = false;
  860. } else
  861. up_read(&ni->runlist.lock);
  862. rl = NULL;
  863. }
  864. goto map_buffer_cached;
  865. }
  866. } else
  867. lcn = LCN_RL_NOT_MAPPED;
  868. /*
  869. * If it is not a hole and not out of bounds, the runlist is
  870. * probably unmapped so try to map it now.
  871. */
  872. if (unlikely(lcn != LCN_HOLE && lcn != LCN_ENOENT)) {
  873. if (likely(!is_retry && lcn == LCN_RL_NOT_MAPPED)) {
  874. /* Attempt to map runlist. */
  875. if (!rl_write_locked) {
  876. /*
  877. * We need the runlist locked for
  878. * writing, so if it is locked for
  879. * reading relock it now and retry in
  880. * case it changed whilst we dropped
  881. * the lock.
  882. */
  883. up_read(&ni->runlist.lock);
  884. down_write(&ni->runlist.lock);
  885. rl_write_locked = true;
  886. goto retry_remap;
  887. }
  888. err = ntfs_map_runlist_nolock(ni, bh_cpos,
  889. NULL);
  890. if (likely(!err)) {
  891. is_retry = true;
  892. goto retry_remap;
  893. }
  894. /*
  895. * If @vcn is out of bounds, pretend @lcn is
  896. * LCN_ENOENT. As long as the buffer is out
  897. * of bounds this will work fine.
  898. */
  899. if (err == -ENOENT) {
  900. lcn = LCN_ENOENT;
  901. err = 0;
  902. goto rl_not_mapped_enoent;
  903. }
  904. } else
  905. err = -EIO;
  906. /* Failed to map the buffer, even after retrying. */
  907. bh->b_blocknr = -1;
  908. ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
  909. "attribute type 0x%x, vcn 0x%llx, "
  910. "vcn offset 0x%x, because its "
  911. "location on disk could not be "
  912. "determined%s (error code %i).",
  913. ni->mft_no, ni->type,
  914. (unsigned long long)bh_cpos,
  915. (unsigned)bh_pos &
  916. vol->cluster_size_mask,
  917. is_retry ? " even after retrying" : "",
  918. err);
  919. break;
  920. }
  921. rl_not_mapped_enoent:
  922. /*
  923. * The buffer is in a hole or out of bounds. We need to fill
  924. * the hole, unless the buffer is in a cluster which is not
  925. * touched by the write, in which case we just leave the buffer
  926. * unmapped. This can only happen when the cluster size is
  927. * less than the page cache size.
  928. */
  929. if (unlikely(vol->cluster_size < PAGE_SIZE)) {
  930. bh_cend = (bh_end + vol->cluster_size - 1) >>
  931. vol->cluster_size_bits;
  932. if ((bh_cend <= cpos || bh_cpos >= cend)) {
  933. bh->b_blocknr = -1;
  934. /*
  935. * If the buffer is uptodate we skip it. If it
  936. * is not but the page is uptodate, we can set
  937. * the buffer uptodate. If the page is not
  938. * uptodate, we can clear the buffer and set it
  939. * uptodate. Whether this is worthwhile is
  940. * debatable and this could be removed.
  941. */
  942. if (PageUptodate(page)) {
  943. if (!buffer_uptodate(bh))
  944. set_buffer_uptodate(bh);
  945. } else if (!buffer_uptodate(bh)) {
  946. zero_user(page, bh_offset(bh),
  947. blocksize);
  948. set_buffer_uptodate(bh);
  949. }
  950. continue;
  951. }
  952. }
  953. /*
  954. * Out of bounds buffer is invalid if it was not really out of
  955. * bounds.
  956. */
  957. BUG_ON(lcn != LCN_HOLE);
  958. /*
  959. * We need the runlist locked for writing, so if it is locked
  960. * for reading relock it now and retry in case it changed
  961. * whilst we dropped the lock.
  962. */
  963. BUG_ON(!rl);
  964. if (!rl_write_locked) {
  965. up_read(&ni->runlist.lock);
  966. down_write(&ni->runlist.lock);
  967. rl_write_locked = true;
  968. goto retry_remap;
  969. }
  970. /* Find the previous last allocated cluster. */
  971. BUG_ON(rl->lcn != LCN_HOLE);
  972. lcn = -1;
  973. rl2 = rl;
  974. while (--rl2 >= ni->runlist.rl) {
  975. if (rl2->lcn >= 0) {
  976. lcn = rl2->lcn + rl2->length;
  977. break;
  978. }
  979. }
  980. rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
  981. false);
  982. if (IS_ERR(rl2)) {
  983. err = PTR_ERR(rl2);
  984. ntfs_debug("Failed to allocate cluster, error code %i.",
  985. err);
  986. break;
  987. }
  988. lcn = rl2->lcn;
  989. rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
  990. if (IS_ERR(rl)) {
  991. err = PTR_ERR(rl);
  992. if (err != -ENOMEM)
  993. err = -EIO;
  994. if (ntfs_cluster_free_from_rl(vol, rl2)) {
  995. ntfs_error(vol->sb, "Failed to release "
  996. "allocated cluster in error "
  997. "code path. Run chkdsk to "
  998. "recover the lost cluster.");
  999. NVolSetErrors(vol);
  1000. }
  1001. ntfs_free(rl2);
  1002. break;
  1003. }
  1004. ni->runlist.rl = rl;
  1005. status.runlist_merged = 1;
  1006. ntfs_debug("Allocated cluster, lcn 0x%llx.",
  1007. (unsigned long long)lcn);
  1008. /* Map and lock the mft record and get the attribute record. */
  1009. if (!NInoAttr(ni))
  1010. base_ni = ni;
  1011. else
  1012. base_ni = ni->ext.base_ntfs_ino;
  1013. m = map_mft_record(base_ni);
  1014. if (IS_ERR(m)) {
  1015. err = PTR_ERR(m);
  1016. break;
  1017. }
  1018. ctx = ntfs_attr_get_search_ctx(base_ni, m);
  1019. if (unlikely(!ctx)) {
  1020. err = -ENOMEM;
  1021. unmap_mft_record(base_ni);
  1022. break;
  1023. }
  1024. status.mft_attr_mapped = 1;
  1025. err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
  1026. CASE_SENSITIVE, bh_cpos, NULL, 0, ctx);
  1027. if (unlikely(err)) {
  1028. if (err == -ENOENT)
  1029. err = -EIO;
  1030. break;
  1031. }
  1032. m = ctx->mrec;
  1033. a = ctx->attr;
  1034. /*
  1035. * Find the runlist element with which the attribute extent
  1036. * starts. Note, we cannot use the _attr_ version because we
  1037. * have mapped the mft record. That is ok because we know the
  1038. * runlist fragment must be mapped already to have ever gotten
  1039. * here, so we can just use the _rl_ version.
  1040. */
  1041. vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn);
  1042. rl2 = ntfs_rl_find_vcn_nolock(rl, vcn);
  1043. BUG_ON(!rl2);
  1044. BUG_ON(!rl2->length);
  1045. BUG_ON(rl2->lcn < LCN_HOLE);
  1046. highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
  1047. /*
  1048. * If @highest_vcn is zero, calculate the real highest_vcn
  1049. * (which can really be zero).
  1050. */
  1051. if (!highest_vcn)
  1052. highest_vcn = (sle64_to_cpu(
  1053. a->data.non_resident.allocated_size) >>
  1054. vol->cluster_size_bits) - 1;
  1055. /*
  1056. * Determine the size of the mapping pairs array for the new
  1057. * extent, i.e. the old extent with the hole filled.
  1058. */
  1059. mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn,
  1060. highest_vcn);
  1061. if (unlikely(mp_size <= 0)) {
  1062. if (!(err = mp_size))
  1063. err = -EIO;
  1064. ntfs_debug("Failed to get size for mapping pairs "
  1065. "array, error code %i.", err);
  1066. break;
  1067. }
  1068. /*
  1069. * Resize the attribute record to fit the new mapping pairs
  1070. * array.
  1071. */
  1072. attr_rec_len = le32_to_cpu(a->length);
  1073. err = ntfs_attr_record_resize(m, a, mp_size + le16_to_cpu(
  1074. a->data.non_resident.mapping_pairs_offset));
  1075. if (unlikely(err)) {
  1076. BUG_ON(err != -ENOSPC);
  1077. // TODO: Deal with this by using the current attribute
  1078. // and fill it with as much of the mapping pairs
  1079. // array as possible. Then loop over each attribute
  1080. // extent rewriting the mapping pairs arrays as we go
  1081. // along and if when we reach the end we have not
  1082. // enough space, try to resize the last attribute
  1083. // extent and if even that fails, add a new attribute
  1084. // extent.
  1085. // We could also try to resize at each step in the hope
  1086. // that we will not need to rewrite every single extent.
  1087. // Note, we may need to decompress some extents to fill
  1088. // the runlist as we are walking the extents...
  1089. ntfs_error(vol->sb, "Not enough space in the mft "
  1090. "record for the extended attribute "
  1091. "record. This case is not "
  1092. "implemented yet.");
  1093. err = -EOPNOTSUPP;
  1094. break ;
  1095. }
  1096. status.mp_rebuilt = 1;
  1097. /*
  1098. * Generate the mapping pairs array directly into the attribute
  1099. * record.
  1100. */
  1101. err = ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
  1102. a->data.non_resident.mapping_pairs_offset),
  1103. mp_size, rl2, vcn, highest_vcn, NULL);
  1104. if (unlikely(err)) {
  1105. ntfs_error(vol->sb, "Cannot fill hole in inode 0x%lx, "
  1106. "attribute type 0x%x, because building "
  1107. "the mapping pairs failed with error "
  1108. "code %i.", vi->i_ino,
  1109. (unsigned)le32_to_cpu(ni->type), err);
  1110. err = -EIO;
  1111. break;
  1112. }
  1113. /* Update the highest_vcn but only if it was not set. */
  1114. if (unlikely(!a->data.non_resident.highest_vcn))
  1115. a->data.non_resident.highest_vcn =
  1116. cpu_to_sle64(highest_vcn);
  1117. /*
  1118. * If the attribute is sparse/compressed, update the compressed
  1119. * size in the ntfs_inode structure and the attribute record.
  1120. */
  1121. if (likely(NInoSparse(ni) || NInoCompressed(ni))) {
  1122. /*
  1123. * If we are not in the first attribute extent, switch
  1124. * to it, but first ensure the changes will make it to
  1125. * disk later.
  1126. */
  1127. if (a->data.non_resident.lowest_vcn) {
  1128. flush_dcache_mft_record_page(ctx->ntfs_ino);
  1129. mark_mft_record_dirty(ctx->ntfs_ino);
  1130. ntfs_attr_reinit_search_ctx(ctx);
  1131. err = ntfs_attr_lookup(ni->type, ni->name,
  1132. ni->name_len, CASE_SENSITIVE,
  1133. 0, NULL, 0, ctx);
  1134. if (unlikely(err)) {
  1135. status.attr_switched = 1;
  1136. break;
  1137. }
  1138. /* @m is not used any more so do not set it. */
  1139. a = ctx->attr;
  1140. }
  1141. write_lock_irqsave(&ni->size_lock, flags);
  1142. ni->itype.compressed.size += vol->cluster_size;
  1143. a->data.non_resident.compressed_size =
  1144. cpu_to_sle64(ni->itype.compressed.size);
  1145. write_unlock_irqrestore(&ni->size_lock, flags);
  1146. }
  1147. /* Ensure the changes make it to disk. */
  1148. flush_dcache_mft_record_page(ctx->ntfs_ino);
  1149. mark_mft_record_dirty(ctx->ntfs_ino);
  1150. ntfs_attr_put_search_ctx(ctx);
  1151. unmap_mft_record(base_ni);
  1152. /* Successfully filled the hole. */
  1153. status.runlist_merged = 0;
  1154. status.mft_attr_mapped = 0;
  1155. status.mp_rebuilt = 0;
  1156. /* Setup the map cache and use that to deal with the buffer. */
  1157. was_hole = true;
  1158. vcn = bh_cpos;
  1159. vcn_len = 1;
  1160. lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
  1161. cdelta = 0;
  1162. /*
  1163. * If the number of remaining clusters in the @pages is smaller
  1164. * or equal to the number of cached clusters, unlock the
  1165. * runlist as the map cache will be used from now on.
  1166. */
  1167. if (likely(vcn + vcn_len >= cend)) {
  1168. up_write(&ni->runlist.lock);
  1169. rl_write_locked = false;
  1170. rl = NULL;
  1171. }
  1172. goto map_buffer_cached;
  1173. } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
  1174. /* If there are no errors, do the next page. */
  1175. if (likely(!err && ++u < nr_pages))
  1176. goto do_next_page;
  1177. /* If there are no errors, release the runlist lock if we took it. */
  1178. if (likely(!err)) {
  1179. if (unlikely(rl_write_locked)) {
  1180. up_write(&ni->runlist.lock);
  1181. rl_write_locked = false;
  1182. } else if (unlikely(rl))
  1183. up_read(&ni->runlist.lock);
  1184. rl = NULL;
  1185. }
  1186. /* If we issued read requests, let them complete. */
  1187. read_lock_irqsave(&ni->size_lock, flags);
  1188. initialized_size = ni->initialized_size;
  1189. read_unlock_irqrestore(&ni->size_lock, flags);
  1190. while (wait_bh > wait) {
  1191. bh = *--wait_bh;
  1192. wait_on_buffer(bh);
  1193. if (likely(buffer_uptodate(bh))) {
  1194. page = bh->b_page;
  1195. bh_pos = ((s64)page->index << PAGE_SHIFT) +
  1196. bh_offset(bh);
  1197. /*
  1198. * If the buffer overflows the initialized size, need
  1199. * to zero the overflowing region.
  1200. */
  1201. if (unlikely(bh_pos + blocksize > initialized_size)) {
  1202. int ofs = 0;
  1203. if (likely(bh_pos < initialized_size))
  1204. ofs = initialized_size - bh_pos;
  1205. zero_user_segment(page, bh_offset(bh) + ofs,
  1206. blocksize);
  1207. }
  1208. } else /* if (unlikely(!buffer_uptodate(bh))) */
  1209. err = -EIO;
  1210. }
  1211. if (likely(!err)) {
  1212. /* Clear buffer_new on all buffers. */
  1213. u = 0;
  1214. do {
  1215. bh = head = page_buffers(pages[u]);
  1216. do {
  1217. if (buffer_new(bh))
  1218. clear_buffer_new(bh);
  1219. } while ((bh = bh->b_this_page) != head);
  1220. } while (++u < nr_pages);
  1221. ntfs_debug("Done.");
  1222. return err;
  1223. }
  1224. if (status.attr_switched) {
  1225. /* Get back to the attribute extent we modified. */
  1226. ntfs_attr_reinit_search_ctx(ctx);
  1227. if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
  1228. CASE_SENSITIVE, bh_cpos, NULL, 0, ctx)) {
  1229. ntfs_error(vol->sb, "Failed to find required "
  1230. "attribute extent of attribute in "
  1231. "error code path. Run chkdsk to "
  1232. "recover.");
  1233. write_lock_irqsave(&ni->size_lock, flags);
  1234. ni->itype.compressed.size += vol->cluster_size;
  1235. write_unlock_irqrestore(&ni->size_lock, flags);
  1236. flush_dcache_mft_record_page(ctx->ntfs_ino);
  1237. mark_mft_record_dirty(ctx->ntfs_ino);
  1238. /*
  1239. * The only thing that is now wrong is the compressed
  1240. * size of the base attribute extent which chkdsk
  1241. * should be able to fix.
  1242. */
  1243. NVolSetErrors(vol);
  1244. } else {
  1245. m = ctx->mrec;
  1246. a = ctx->attr;
  1247. status.attr_switched = 0;
  1248. }
  1249. }
  1250. /*
  1251. * If the runlist has been modified, need to restore it by punching a
  1252. * hole into it and we then need to deallocate the on-disk cluster as
  1253. * well. Note, we only modify the runlist if we are able to generate a
  1254. * new mapping pairs array, i.e. only when the mapped attribute extent
  1255. * is not switched.
  1256. */
  1257. if (status.runlist_merged && !status.attr_switched) {
  1258. BUG_ON(!rl_write_locked);
  1259. /* Make the file cluster we allocated sparse in the runlist. */
  1260. if (ntfs_rl_punch_nolock(vol, &ni->runlist, bh_cpos, 1)) {
  1261. ntfs_error(vol->sb, "Failed to punch hole into "
  1262. "attribute runlist in error code "
  1263. "path. Run chkdsk to recover the "
  1264. "lost cluster.");
  1265. NVolSetErrors(vol);
  1266. } else /* if (success) */ {
  1267. status.runlist_merged = 0;
  1268. /*
  1269. * Deallocate the on-disk cluster we allocated but only
  1270. * if we succeeded in punching its vcn out of the
  1271. * runlist.
  1272. */
  1273. down_write(&vol->lcnbmp_lock);
  1274. if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) {
  1275. ntfs_error(vol->sb, "Failed to release "
  1276. "allocated cluster in error "
  1277. "code path. Run chkdsk to "
  1278. "recover the lost cluster.");
  1279. NVolSetErrors(vol);
  1280. }
  1281. up_write(&vol->lcnbmp_lock);
  1282. }
  1283. }
  1284. /*
  1285. * Resize the attribute record to its old size and rebuild the mapping
  1286. * pairs array. Note, we only can do this if the runlist has been
  1287. * restored to its old state which also implies that the mapped
  1288. * attribute extent is not switched.
  1289. */
  1290. if (status.mp_rebuilt && !status.runlist_merged) {
  1291. if (ntfs_attr_record_resize(m, a, attr_rec_len)) {
  1292. ntfs_error(vol->sb, "Failed to restore attribute "
  1293. "record in error code path. Run "
  1294. "chkdsk to recover.");
  1295. NVolSetErrors(vol);
  1296. } else /* if (success) */ {
  1297. if (ntfs_mapping_pairs_build(vol, (u8*)a +
  1298. le16_to_cpu(a->data.non_resident.
  1299. mapping_pairs_offset), attr_rec_len -
  1300. le16_to_cpu(a->data.non_resident.
  1301. mapping_pairs_offset), ni->runlist.rl,
  1302. vcn, highest_vcn, NULL)) {
  1303. ntfs_error(vol->sb, "Failed to restore "
  1304. "mapping pairs array in error "
  1305. "code path. Run chkdsk to "
  1306. "recover.");
  1307. NVolSetErrors(vol);
  1308. }
  1309. flush_dcache_mft_record_page(ctx->ntfs_ino);
  1310. mark_mft_record_dirty(ctx->ntfs_ino);
  1311. }
  1312. }
  1313. /* Release the mft record and the attribute. */
  1314. if (status.mft_attr_mapped) {
  1315. ntfs_attr_put_search_ctx(ctx);
  1316. unmap_mft_record(base_ni);
  1317. }
  1318. /* Release the runlist lock. */
  1319. if (rl_write_locked)
  1320. up_write(&ni->runlist.lock);
  1321. else if (rl)
  1322. up_read(&ni->runlist.lock);
  1323. /*
  1324. * Zero out any newly allocated blocks to avoid exposing stale data.
  1325. * If BH_New is set, we know that the block was newly allocated above
  1326. * and that it has not been fully zeroed and marked dirty yet.
  1327. */
  1328. nr_pages = u;
  1329. u = 0;
  1330. end = bh_cpos << vol->cluster_size_bits;
  1331. do {
  1332. page = pages[u];
  1333. bh = head = page_buffers(page);
  1334. do {
  1335. if (u == nr_pages &&
  1336. ((s64)page->index << PAGE_SHIFT) +
  1337. bh_offset(bh) >= end)
  1338. break;
  1339. if (!buffer_new(bh))
  1340. continue;
  1341. clear_buffer_new(bh);
  1342. if (!buffer_uptodate(bh)) {
  1343. if (PageUptodate(page))
  1344. set_buffer_uptodate(bh);
  1345. else {
  1346. zero_user(page, bh_offset(bh),
  1347. blocksize);
  1348. set_buffer_uptodate(bh);
  1349. }
  1350. }
  1351. mark_buffer_dirty(bh);
  1352. } while ((bh = bh->b_this_page) != head);
  1353. } while (++u <= nr_pages);
  1354. ntfs_error(vol->sb, "Failed. Returning error code %i.", err);
  1355. return err;
  1356. }
  1357. static inline void ntfs_flush_dcache_pages(struct page **pages,
  1358. unsigned nr_pages)
  1359. {
  1360. BUG_ON(!nr_pages);
  1361. /*
  1362. * Warning: Do not do the decrement at the same time as the call to
  1363. * flush_dcache_page() because it is a NULL macro on i386 and hence the
  1364. * decrement never happens so the loop never terminates.
  1365. */
  1366. do {
  1367. --nr_pages;
  1368. flush_dcache_page(pages[nr_pages]);
  1369. } while (nr_pages > 0);
  1370. }
  1371. /**
  1372. * ntfs_commit_pages_after_non_resident_write - commit the received data
  1373. * @pages: array of destination pages
  1374. * @nr_pages: number of pages in @pages
  1375. * @pos: byte position in file at which the write begins
  1376. * @bytes: number of bytes to be written
  1377. *
  1378. * See description of ntfs_commit_pages_after_write(), below.
  1379. */
  1380. static inline int ntfs_commit_pages_after_non_resident_write(
  1381. struct page **pages, const unsigned nr_pages,
  1382. s64 pos, size_t bytes)
  1383. {
  1384. s64 end, initialized_size;
  1385. struct inode *vi;
  1386. ntfs_inode *ni, *base_ni;
  1387. struct buffer_head *bh, *head;
  1388. ntfs_attr_search_ctx *ctx;
  1389. MFT_RECORD *m;
  1390. ATTR_RECORD *a;
  1391. unsigned long flags;
  1392. unsigned blocksize, u;
  1393. int err;
  1394. vi = pages[0]->mapping->host;
  1395. ni = NTFS_I(vi);
  1396. blocksize = vi->i_sb->s_blocksize;
  1397. end = pos + bytes;
  1398. u = 0;
  1399. do {
  1400. s64 bh_pos;
  1401. struct page *page;
  1402. bool partial;
  1403. page = pages[u];
  1404. bh_pos = (s64)page->index << PAGE_SHIFT;
  1405. bh = head = page_buffers(page);
  1406. partial = false;
  1407. do {
  1408. s64 bh_end;
  1409. bh_end = bh_pos + blocksize;
  1410. if (bh_end <= pos || bh_pos >= end) {
  1411. if (!buffer_uptodate(bh))
  1412. partial = true;
  1413. } else {
  1414. set_buffer_uptodate(bh);
  1415. mark_buffer_dirty(bh);
  1416. }
  1417. } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
  1418. /*
  1419. * If all buffers are now uptodate but the page is not, set the
  1420. * page uptodate.
  1421. */
  1422. if (!partial && !PageUptodate(page))
  1423. SetPageUptodate(page);
  1424. } while (++u < nr_pages);
  1425. /*
  1426. * Finally, if we do not need to update initialized_size or i_size we
  1427. * are finished.
  1428. */
  1429. read_lock_irqsave(&ni->size_lock, flags);
  1430. initialized_size = ni->initialized_size;
  1431. read_unlock_irqrestore(&ni->size_lock, flags);
  1432. if (end <= initialized_size) {
  1433. ntfs_debug("Done.");
  1434. return 0;
  1435. }
  1436. /*
  1437. * Update initialized_size/i_size as appropriate, both in the inode and
  1438. * the mft record.
  1439. */
  1440. if (!NInoAttr(ni))
  1441. base_ni = ni;
  1442. else
  1443. base_ni = ni->ext.base_ntfs_ino;
  1444. /* Map, pin, and lock the mft record. */
  1445. m = map_mft_record(base_ni);
  1446. if (IS_ERR(m)) {
  1447. err = PTR_ERR(m);
  1448. m = NULL;
  1449. ctx = NULL;
  1450. goto err_out;
  1451. }
  1452. BUG_ON(!NInoNonResident(ni));
  1453. ctx = ntfs_attr_get_search_ctx(base_ni, m);
  1454. if (unlikely(!ctx)) {
  1455. err = -ENOMEM;
  1456. goto err_out;
  1457. }
  1458. err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
  1459. CASE_SENSITIVE, 0, NULL, 0, ctx);
  1460. if (unlikely(err)) {
  1461. if (err == -ENOENT)
  1462. err = -EIO;
  1463. goto err_out;
  1464. }
  1465. a = ctx->attr;
  1466. BUG_ON(!a->non_resident);
  1467. write_lock_irqsave(&ni->size_lock, flags);
  1468. BUG_ON(end > ni->allocated_size);
  1469. ni->initialized_size = end;
  1470. a->data.non_resident.initialized_size = cpu_to_sle64(end);
  1471. if (end > i_size_read(vi)) {
  1472. i_size_write(vi, end);
  1473. a->data.non_resident.data_size =
  1474. a->data.non_resident.initialized_size;
  1475. }
  1476. write_unlock_irqrestore(&ni->size_lock, flags);
  1477. /* Mark the mft record dirty, so it gets written back. */
  1478. flush_dcache_mft_record_page(ctx->ntfs_ino);
  1479. mark_mft_record_dirty(ctx->ntfs_ino);
  1480. ntfs_attr_put_search_ctx(ctx);
  1481. unmap_mft_record(base_ni);
  1482. ntfs_debug("Done.");
  1483. return 0;
  1484. err_out:
  1485. if (ctx)
  1486. ntfs_attr_put_search_ctx(ctx);
  1487. if (m)
  1488. unmap_mft_record(base_ni);
  1489. ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error "
  1490. "code %i).", err);
  1491. if (err != -ENOMEM)
  1492. NVolSetErrors(ni->vol);
  1493. return err;
  1494. }
  1495. /**
  1496. * ntfs_commit_pages_after_write - commit the received data
  1497. * @pages: array of destination pages
  1498. * @nr_pages: number of pages in @pages
  1499. * @pos: byte position in file at which the write begins
  1500. * @bytes: number of bytes to be written
  1501. *
  1502. * This is called from ntfs_file_buffered_write() with i_mutex held on the inode
  1503. * (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are
  1504. * locked but not kmap()ped. The source data has already been copied into the
  1505. * @page. ntfs_prepare_pages_for_non_resident_write() has been called before
  1506. * the data was copied (for non-resident attributes only) and it returned
  1507. * success.
  1508. *
  1509. * Need to set uptodate and mark dirty all buffers within the boundary of the
  1510. * write. If all buffers in a page are uptodate we set the page uptodate, too.
  1511. *
  1512. * Setting the buffers dirty ensures that they get written out later when
  1513. * ntfs_writepage() is invoked by the VM.
  1514. *
  1515. * Finally, we need to update i_size and initialized_size as appropriate both
  1516. * in the inode and the mft record.
  1517. *
  1518. * This is modelled after fs/buffer.c::generic_commit_write(), which marks
  1519. * buffers uptodate and dirty, sets the page uptodate if all buffers in the
  1520. * page are uptodate, and updates i_size if the end of io is beyond i_size. In
  1521. * that case, it also marks the inode dirty.
  1522. *
  1523. * If things have gone as outlined in
  1524. * ntfs_prepare_pages_for_non_resident_write(), we do not need to do any page
  1525. * content modifications here for non-resident attributes. For resident
  1526. * attributes we need to do the uptodate bringing here which we combine with
  1527. * the copying into the mft record which means we save one atomic kmap.
  1528. *
  1529. * Return 0 on success or -errno on error.
  1530. */
  1531. static int ntfs_commit_pages_after_write(struct page **pages,
  1532. const unsigned nr_pages, s64 pos, size_t bytes)
  1533. {
  1534. s64 end, initialized_size;
  1535. loff_t i_size;
  1536. struct inode *vi;
  1537. ntfs_inode *ni, *base_ni;
  1538. struct page *page;
  1539. ntfs_attr_search_ctx *ctx;
  1540. MFT_RECORD *m;
  1541. ATTR_RECORD *a;
  1542. char *kattr, *kaddr;
  1543. unsigned long flags;
  1544. u32 attr_len;
  1545. int err;
  1546. BUG_ON(!nr_pages);
  1547. BUG_ON(!pages);
  1548. page = pages[0];
  1549. BUG_ON(!page);
  1550. vi = page->mapping->host;
  1551. ni = NTFS_I(vi);
  1552. ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
  1553. "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
  1554. vi->i_ino, ni->type, page->index, nr_pages,
  1555. (long long)pos, bytes);
  1556. if (NInoNonResident(ni))
  1557. return ntfs_commit_pages_after_non_resident_write(pages,
  1558. nr_pages, pos, bytes);
  1559. BUG_ON(nr_pages > 1);
  1560. /*
  1561. * Attribute is resident, implying it is not compressed, encrypted, or
  1562. * sparse.
  1563. */
  1564. if (!NInoAttr(ni))
  1565. base_ni = ni;
  1566. else
  1567. base_ni = ni->ext.base_ntfs_ino;
  1568. BUG_ON(NInoNonResident(ni));
  1569. /* Map, pin, and lock the mft record. */
  1570. m = map_mft_record(base_ni);
  1571. if (IS_ERR(m)) {
  1572. err = PTR_ERR(m);
  1573. m = NULL;
  1574. ctx = NULL;
  1575. goto err_out;
  1576. }
  1577. ctx = ntfs_attr_get_search_ctx(base_ni, m);
  1578. if (unlikely(!ctx)) {
  1579. err = -ENOMEM;
  1580. goto err_out;
  1581. }
  1582. err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
  1583. CASE_SENSITIVE, 0, NULL, 0, ctx);
  1584. if (unlikely(err)) {
  1585. if (err == -ENOENT)
  1586. err = -EIO;
  1587. goto err_out;
  1588. }
  1589. a = ctx->attr;
  1590. BUG_ON(a->non_resident);
  1591. /* The total length of the attribute value. */
  1592. attr_len = le32_to_cpu(a->data.resident.value_length);
  1593. i_size = i_size_read(vi);
  1594. BUG_ON(attr_len != i_size);
  1595. BUG_ON(pos > attr_len);
  1596. end = pos + bytes;
  1597. BUG_ON(end > le32_to_cpu(a->length) -
  1598. le16_to_cpu(a->data.resident.value_offset));
  1599. kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
  1600. kaddr = kmap_atomic(page);
  1601. /* Copy the received data from the page to the mft record. */
  1602. memcpy(kattr + pos, kaddr + pos, bytes);
  1603. /* Update the attribute length if necessary. */
  1604. if (end > attr_len) {
  1605. attr_len = end;
  1606. a->data.resident.value_length = cpu_to_le32(attr_len);
  1607. }
  1608. /*
  1609. * If the page is not uptodate, bring the out of bounds area(s)
  1610. * uptodate by copying data from the mft record to the page.
  1611. */
  1612. if (!PageUptodate(page)) {
  1613. if (pos > 0)
  1614. memcpy(kaddr, kattr, pos);
  1615. if (end < attr_len)
  1616. memcpy(kaddr + end, kattr + end, attr_len - end);
  1617. /* Zero the region outside the end of the attribute value. */
  1618. memset(kaddr + attr_len, 0, PAGE_SIZE - attr_len);
  1619. flush_dcache_page(page);
  1620. SetPageUptodate(page);
  1621. }
  1622. kunmap_atomic(kaddr);
  1623. /* Update initialized_size/i_size if necessary. */
  1624. read_lock_irqsave(&ni->size_lock, flags);
  1625. initialized_size = ni->initialized_size;
  1626. BUG_ON(end > ni->allocated_size);
  1627. read_unlock_irqrestore(&ni->size_lock, flags);
  1628. BUG_ON(initialized_size != i_size);
  1629. if (end > initialized_size) {
  1630. write_lock_irqsave(&ni->size_lock, flags);
  1631. ni->initialized_size = end;
  1632. i_size_write(vi, end);
  1633. write_unlock_irqrestore(&ni->size_lock, flags);
  1634. }
  1635. /* Mark the mft record dirty, so it gets written back. */
  1636. flush_dcache_mft_record_page(ctx->ntfs_ino);
  1637. mark_mft_record_dirty(ctx->ntfs_ino);
  1638. ntfs_attr_put_search_ctx(ctx);
  1639. unmap_mft_record(base_ni);
  1640. ntfs_debug("Done.");
  1641. return 0;
  1642. err_out:
  1643. if (err == -ENOMEM) {
  1644. ntfs_warning(vi->i_sb, "Error allocating memory required to "
  1645. "commit the write.");
  1646. if (PageUptodate(page)) {
  1647. ntfs_warning(vi->i_sb, "Page is uptodate, setting "
  1648. "dirty so the write will be retried "
  1649. "later on by the VM.");
  1650. /*
  1651. * Put the page on mapping->dirty_pages, but leave its
  1652. * buffers' dirty state as-is.
  1653. */
  1654. __set_page_dirty_nobuffers(page);
  1655. err = 0;
  1656. } else
  1657. ntfs_error(vi->i_sb, "Page is not uptodate. Written "
  1658. "data has been lost.");
  1659. } else {
  1660. ntfs_error(vi->i_sb, "Resident attribute commit write failed "
  1661. "with error %i.", err);
  1662. NVolSetErrors(ni->vol);
  1663. }
  1664. if (ctx)
  1665. ntfs_attr_put_search_ctx(ctx);
  1666. if (m)
  1667. unmap_mft_record(base_ni);
  1668. return err;
  1669. }
  1670. /*
  1671. * Copy as much as we can into the pages and return the number of bytes which
  1672. * were successfully copied. If a fault is encountered then clear the pages
  1673. * out to (ofs + bytes) and return the number of bytes which were copied.
  1674. */
  1675. static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
  1676. unsigned ofs, struct iov_iter *i, size_t bytes)
  1677. {
  1678. struct page **last_page = pages + nr_pages;
  1679. size_t total = 0;
  1680. struct iov_iter data = *i;
  1681. unsigned len, copied;
  1682. do {
  1683. len = PAGE_SIZE - ofs;
  1684. if (len > bytes)
  1685. len = bytes;
  1686. copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs,
  1687. len);
  1688. total += copied;
  1689. bytes -= copied;
  1690. if (!bytes)
  1691. break;
  1692. iov_iter_advance(&data, copied);
  1693. if (copied < len)
  1694. goto err;
  1695. ofs = 0;
  1696. } while (++pages < last_page);
  1697. out:
  1698. return total;
  1699. err:
  1700. /* Zero the rest of the target like __copy_from_user(). */
  1701. len = PAGE_SIZE - copied;
  1702. do {
  1703. if (len > bytes)
  1704. len = bytes;
  1705. zero_user(*pages, copied, len);
  1706. bytes -= len;
  1707. copied = 0;
  1708. len = PAGE_SIZE;
  1709. } while (++pages < last_page);
  1710. goto out;
  1711. }
  1712. /**
  1713. * ntfs_perform_write - perform buffered write to a file
  1714. * @file: file to write to
  1715. * @i: iov_iter with data to write
  1716. * @pos: byte offset in file at which to begin writing to
  1717. */
  1718. static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
  1719. loff_t pos)
  1720. {
  1721. struct address_space *mapping = file->f_mapping;
  1722. struct inode *vi = mapping->host;
  1723. ntfs_inode *ni = NTFS_I(vi);
  1724. ntfs_volume *vol = ni->vol;
  1725. struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER];
  1726. struct page *cached_page = NULL;
  1727. VCN last_vcn;
  1728. LCN lcn;
  1729. size_t bytes;
  1730. ssize_t status, written = 0;
  1731. unsigned nr_pages;
  1732. ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
  1733. "0x%llx, count 0x%lx.", vi->i_ino,
  1734. (unsigned)le32_to_cpu(ni->type),
  1735. (unsigned long long)pos,
  1736. (unsigned long)iov_iter_count(i));
  1737. /*
  1738. * If a previous ntfs_truncate() failed, repeat it and abort if it
  1739. * fails again.
  1740. */
  1741. if (unlikely(NInoTruncateFailed(ni))) {
  1742. int err;
  1743. inode_dio_wait(vi);
  1744. err = ntfs_truncate(vi);
  1745. if (err || NInoTruncateFailed(ni)) {
  1746. if (!err)
  1747. err = -EIO;
  1748. ntfs_error(vol->sb, "Cannot perform write to inode "
  1749. "0x%lx, attribute type 0x%x, because "
  1750. "ntfs_truncate() failed (error code "
  1751. "%i).", vi->i_ino,
  1752. (unsigned)le32_to_cpu(ni->type), err);
  1753. return err;
  1754. }
  1755. }
  1756. /*
  1757. * Determine the number of pages per cluster for non-resident
  1758. * attributes.
  1759. */
  1760. nr_pages = 1;
  1761. if (vol->cluster_size > PAGE_SIZE && NInoNonResident(ni))
  1762. nr_pages = vol->cluster_size >> PAGE_SHIFT;
  1763. last_vcn = -1;
  1764. do {
  1765. VCN vcn;
  1766. pgoff_t idx, start_idx;
  1767. unsigned ofs, do_pages, u;
  1768. size_t copied;
  1769. start_idx = idx = pos >> PAGE_SHIFT;
  1770. ofs = pos & ~PAGE_MASK;
  1771. bytes = PAGE_SIZE - ofs;
  1772. do_pages = 1;
  1773. if (nr_pages > 1) {
  1774. vcn = pos >> vol->cluster_size_bits;
  1775. if (vcn != last_vcn) {
  1776. last_vcn = vcn;
  1777. /*
  1778. * Get the lcn of the vcn the write is in. If
  1779. * it is a hole, need to lock down all pages in
  1780. * the cluster.
  1781. */
  1782. down_read(&ni->runlist.lock);
  1783. lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
  1784. vol->cluster_size_bits, false);
  1785. up_read(&ni->runlist.lock);
  1786. if (unlikely(lcn < LCN_HOLE)) {
  1787. if (lcn == LCN_ENOMEM)
  1788. status = -ENOMEM;
  1789. else {
  1790. status = -EIO;
  1791. ntfs_error(vol->sb, "Cannot "
  1792. "perform write to "
  1793. "inode 0x%lx, "
  1794. "attribute type 0x%x, "
  1795. "because the attribute "
  1796. "is corrupt.",
  1797. vi->i_ino, (unsigned)
  1798. le32_to_cpu(ni->type));
  1799. }
  1800. break;
  1801. }
  1802. if (lcn == LCN_HOLE) {
  1803. start_idx = (pos & ~(s64)
  1804. vol->cluster_size_mask)
  1805. >> PAGE_SHIFT;
  1806. bytes = vol->cluster_size - (pos &
  1807. vol->cluster_size_mask);
  1808. do_pages = nr_pages;
  1809. }
  1810. }
  1811. }
  1812. if (bytes > iov_iter_count(i))
  1813. bytes = iov_iter_count(i);
  1814. again:
  1815. /*
  1816. * Bring in the user page(s) that we will copy from _first_.
  1817. * Otherwise there is a nasty deadlock on copying from the same
  1818. * page(s) as we are writing to, without it/them being marked
  1819. * up-to-date. Note, at present there is nothing to stop the
  1820. * pages being swapped out between us bringing them into memory
  1821. * and doing the actual copying.
  1822. */
  1823. if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
  1824. status = -EFAULT;
  1825. break;
  1826. }
  1827. /* Get and lock @do_pages starting at index @start_idx. */
  1828. status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
  1829. pages, &cached_page);
  1830. if (unlikely(status))
  1831. break;
  1832. /*
  1833. * For non-resident attributes, we need to fill any holes with
  1834. * actual clusters and ensure all bufferes are mapped. We also
  1835. * need to bring uptodate any buffers that are only partially
  1836. * being written to.
  1837. */
  1838. if (NInoNonResident(ni)) {
  1839. status = ntfs_prepare_pages_for_non_resident_write(
  1840. pages, do_pages, pos, bytes);
  1841. if (unlikely(status)) {
  1842. do {
  1843. unlock_page(pages[--do_pages]);
  1844. put_page(pages[do_pages]);
  1845. } while (do_pages);
  1846. break;
  1847. }
  1848. }
  1849. u = (pos >> PAGE_SHIFT) - pages[0]->index;
  1850. copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs,
  1851. i, bytes);
  1852. ntfs_flush_dcache_pages(pages + u, do_pages - u);
  1853. status = 0;
  1854. if (likely(copied == bytes)) {
  1855. status = ntfs_commit_pages_after_write(pages, do_pages,
  1856. pos, bytes);
  1857. if (!status)
  1858. status = bytes;
  1859. }
  1860. do {
  1861. unlock_page(pages[--do_pages]);
  1862. put_page(pages[do_pages]);
  1863. } while (do_pages);
  1864. if (unlikely(status < 0))
  1865. break;
  1866. copied = status;
  1867. cond_resched();
  1868. if (unlikely(!copied)) {
  1869. size_t sc;
  1870. /*
  1871. * We failed to copy anything. Fall back to single
  1872. * segment length write.
  1873. *
  1874. * This is needed to avoid possible livelock in the
  1875. * case that all segments in the iov cannot be copied
  1876. * at once without a pagefault.
  1877. */
  1878. sc = iov_iter_single_seg_count(i);
  1879. if (bytes > sc)
  1880. bytes = sc;
  1881. goto again;
  1882. }
  1883. iov_iter_advance(i, copied);
  1884. pos += copied;
  1885. written += copied;
  1886. balance_dirty_pages_ratelimited(mapping);
  1887. if (fatal_signal_pending(current)) {
  1888. status = -EINTR;
  1889. break;
  1890. }
  1891. } while (iov_iter_count(i));
  1892. if (cached_page)
  1893. put_page(cached_page);
  1894. ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
  1895. written ? "written" : "status", (unsigned long)written,
  1896. (long)status);
  1897. return written ? written : status;
  1898. }
  1899. /**
  1900. * ntfs_file_write_iter - simple wrapper for ntfs_file_write_iter_nolock()
  1901. * @iocb: IO state structure
  1902. * @from: iov_iter with data to write
  1903. *
  1904. * Basically the same as generic_file_write_iter() except that it ends up
  1905. * up calling ntfs_perform_write() instead of generic_perform_write() and that
  1906. * O_DIRECT is not implemented.
  1907. */
  1908. static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
  1909. {
  1910. struct file *file = iocb->ki_filp;
  1911. struct inode *vi = file_inode(file);
  1912. ssize_t written = 0;
  1913. ssize_t err;
  1914. inode_lock(vi);
  1915. /* We can write back this queue in page reclaim. */
  1916. current->backing_dev_info = inode_to_bdi(vi);
  1917. err = ntfs_prepare_file_for_write(iocb, from);
  1918. if (iov_iter_count(from) && !err)
  1919. written = ntfs_perform_write(file, from, iocb->ki_pos);
  1920. current->backing_dev_info = NULL;
  1921. inode_unlock(vi);
  1922. iocb->ki_pos += written;
  1923. if (likely(written > 0))
  1924. written = generic_write_sync(iocb, written);
  1925. return written ? written : err;
  1926. }
  1927. /**
  1928. * ntfs_file_fsync - sync a file to disk
  1929. * @filp: file to be synced
  1930. * @datasync: if non-zero only flush user data and not metadata
  1931. *
  1932. * Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync
  1933. * system calls. This function is inspired by fs/buffer.c::file_fsync().
  1934. *
  1935. * If @datasync is false, write the mft record and all associated extent mft
  1936. * records as well as the $DATA attribute and then sync the block device.
  1937. *
  1938. * If @datasync is true and the attribute is non-resident, we skip the writing
  1939. * of the mft record and all associated extent mft records (this might still
  1940. * happen due to the write_inode_now() call).
  1941. *
  1942. * Also, if @datasync is true, we do not wait on the inode to be written out
  1943. * but we always wait on the page cache pages to be written out.
  1944. *
  1945. * Locking: Caller must hold i_mutex on the inode.
  1946. *
  1947. * TODO: We should probably also write all attribute/index inodes associated
  1948. * with this inode but since we have no simple way of getting to them we ignore
  1949. * this problem for now.
  1950. */
  1951. static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end,
  1952. int datasync)
  1953. {
  1954. struct inode *vi = filp->f_mapping->host;
  1955. int err, ret = 0;
  1956. ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
  1957. err = filemap_write_and_wait_range(vi->i_mapping, start, end);
  1958. if (err)
  1959. return err;
  1960. inode_lock(vi);
  1961. BUG_ON(S_ISDIR(vi->i_mode));
  1962. if (!datasync || !NInoNonResident(NTFS_I(vi)))
  1963. ret = __ntfs_write_inode(vi, 1);
  1964. write_inode_now(vi, !datasync);
  1965. /*
  1966. * NOTE: If we were to use mapping->private_list (see ext2 and
  1967. * fs/buffer.c) for dirty blocks then we could optimize the below to be
  1968. * sync_mapping_buffers(vi->i_mapping).
  1969. */
  1970. err = sync_blockdev(vi->i_sb->s_bdev);
  1971. if (unlikely(err && !ret))
  1972. ret = err;
  1973. if (likely(!ret))
  1974. ntfs_debug("Done.");
  1975. else
  1976. ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error "
  1977. "%u.", datasync ? "data" : "", vi->i_ino, -ret);
  1978. inode_unlock(vi);
  1979. return ret;
  1980. }
  1981. #endif /* NTFS_RW */
  1982. const struct file_operations ntfs_file_ops = {
  1983. .llseek = generic_file_llseek,
  1984. .read_iter = generic_file_read_iter,
  1985. #ifdef NTFS_RW
  1986. .write_iter = ntfs_file_write_iter,
  1987. .fsync = ntfs_file_fsync,
  1988. #endif /* NTFS_RW */
  1989. .mmap = generic_file_mmap,
  1990. .open = ntfs_file_open,
  1991. .splice_read = generic_file_splice_read,
  1992. };
  1993. const struct inode_operations ntfs_file_inode_ops = {
  1994. #ifdef NTFS_RW
  1995. .setattr = ntfs_setattr,
  1996. #endif /* NTFS_RW */
  1997. };
  1998. const struct file_operations ntfs_empty_file_ops = {};
  1999. const struct inode_operations ntfs_empty_inode_ops = {};