file.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/ceph/ceph_debug.h>
  3. #include <linux/module.h>
  4. #include <linux/sched.h>
  5. #include <linux/slab.h>
  6. #include <linux/file.h>
  7. #include <linux/mount.h>
  8. #include <linux/namei.h>
  9. #include <linux/writeback.h>
  10. #include <linux/falloc.h>
  11. #include "super.h"
  12. #include "mds_client.h"
  13. #include "cache.h"
  14. static __le32 ceph_flags_sys2wire(u32 flags)
  15. {
  16. u32 wire_flags = 0;
  17. switch (flags & O_ACCMODE) {
  18. case O_RDONLY:
  19. wire_flags |= CEPH_O_RDONLY;
  20. break;
  21. case O_WRONLY:
  22. wire_flags |= CEPH_O_WRONLY;
  23. break;
  24. case O_RDWR:
  25. wire_flags |= CEPH_O_RDWR;
  26. break;
  27. }
  28. flags &= ~O_ACCMODE;
  29. #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
  30. ceph_sys2wire(O_CREAT);
  31. ceph_sys2wire(O_EXCL);
  32. ceph_sys2wire(O_TRUNC);
  33. ceph_sys2wire(O_DIRECTORY);
  34. ceph_sys2wire(O_NOFOLLOW);
  35. #undef ceph_sys2wire
  36. if (flags)
  37. dout("unused open flags: %x\n", flags);
  38. return cpu_to_le32(wire_flags);
  39. }
  40. /*
  41. * Ceph file operations
  42. *
  43. * Implement basic open/close functionality, and implement
  44. * read/write.
  45. *
  46. * We implement three modes of file I/O:
  47. * - buffered uses the generic_file_aio_{read,write} helpers
  48. *
  49. * - synchronous is used when there is multi-client read/write
  50. * sharing, avoids the page cache, and synchronously waits for an
  51. * ack from the OSD.
  52. *
  53. * - direct io takes the variant of the sync path that references
  54. * user pages directly.
  55. *
  56. * fsync() flushes and waits on dirty pages, but just queues metadata
  57. * for writeback: since the MDS can recover size and mtime there is no
  58. * need to wait for MDS acknowledgement.
  59. */
  60. /*
  61. * How many pages to get in one call to iov_iter_get_pages(). This
  62. * determines the size of the on-stack array used as a buffer.
  63. */
  64. #define ITER_GET_BVECS_PAGES 64
  65. static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
  66. struct bio_vec *bvecs)
  67. {
  68. size_t size = 0;
  69. int bvec_idx = 0;
  70. if (maxsize > iov_iter_count(iter))
  71. maxsize = iov_iter_count(iter);
  72. while (size < maxsize) {
  73. struct page *pages[ITER_GET_BVECS_PAGES];
  74. ssize_t bytes;
  75. size_t start;
  76. int idx = 0;
  77. bytes = iov_iter_get_pages(iter, pages, maxsize - size,
  78. ITER_GET_BVECS_PAGES, &start);
  79. if (bytes < 0)
  80. return size ?: bytes;
  81. iov_iter_advance(iter, bytes);
  82. size += bytes;
  83. for ( ; bytes; idx++, bvec_idx++) {
  84. struct bio_vec bv = {
  85. .bv_page = pages[idx],
  86. .bv_len = min_t(int, bytes, PAGE_SIZE - start),
  87. .bv_offset = start,
  88. };
  89. bvecs[bvec_idx] = bv;
  90. bytes -= bv.bv_len;
  91. start = 0;
  92. }
  93. }
  94. return size;
  95. }
  96. /*
  97. * iov_iter_get_pages() only considers one iov_iter segment, no matter
  98. * what maxsize or maxpages are given. For ITER_BVEC that is a single
  99. * page.
  100. *
  101. * Attempt to get up to @maxsize bytes worth of pages from @iter.
  102. * Return the number of bytes in the created bio_vec array, or an error.
  103. */
  104. static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
  105. struct bio_vec **bvecs, int *num_bvecs)
  106. {
  107. struct bio_vec *bv;
  108. size_t orig_count = iov_iter_count(iter);
  109. ssize_t bytes;
  110. int npages;
  111. iov_iter_truncate(iter, maxsize);
  112. npages = iov_iter_npages(iter, INT_MAX);
  113. iov_iter_reexpand(iter, orig_count);
  114. /*
  115. * __iter_get_bvecs() may populate only part of the array -- zero it
  116. * out.
  117. */
  118. bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
  119. if (!bv)
  120. return -ENOMEM;
  121. bytes = __iter_get_bvecs(iter, maxsize, bv);
  122. if (bytes < 0) {
  123. /*
  124. * No pages were pinned -- just free the array.
  125. */
  126. kvfree(bv);
  127. return bytes;
  128. }
  129. *bvecs = bv;
  130. *num_bvecs = npages;
  131. return bytes;
  132. }
  133. static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
  134. {
  135. int i;
  136. for (i = 0; i < num_bvecs; i++) {
  137. if (bvecs[i].bv_page) {
  138. if (should_dirty)
  139. set_page_dirty_lock(bvecs[i].bv_page);
  140. put_page(bvecs[i].bv_page);
  141. }
  142. }
  143. kvfree(bvecs);
  144. }
  145. /*
  146. * Prepare an open request. Preallocate ceph_cap to avoid an
  147. * inopportune ENOMEM later.
  148. */
  149. static struct ceph_mds_request *
  150. prepare_open_request(struct super_block *sb, int flags, int create_mode)
  151. {
  152. struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
  153. struct ceph_mds_client *mdsc = fsc->mdsc;
  154. struct ceph_mds_request *req;
  155. int want_auth = USE_ANY_MDS;
  156. int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
  157. if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
  158. want_auth = USE_AUTH_MDS;
  159. req = ceph_mdsc_create_request(mdsc, op, want_auth);
  160. if (IS_ERR(req))
  161. goto out;
  162. req->r_fmode = ceph_flags_to_mode(flags);
  163. req->r_args.open.flags = ceph_flags_sys2wire(flags);
  164. req->r_args.open.mode = cpu_to_le32(create_mode);
  165. out:
  166. return req;
  167. }
  168. static int ceph_init_file_info(struct inode *inode, struct file *file,
  169. int fmode, bool isdir)
  170. {
  171. struct ceph_file_info *fi;
  172. dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
  173. inode->i_mode, isdir ? "dir" : "regular");
  174. BUG_ON(inode->i_fop->release != ceph_release);
  175. if (isdir) {
  176. struct ceph_dir_file_info *dfi =
  177. kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
  178. if (!dfi) {
  179. ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  180. return -ENOMEM;
  181. }
  182. file->private_data = dfi;
  183. fi = &dfi->file_info;
  184. dfi->next_offset = 2;
  185. dfi->readdir_cache_idx = -1;
  186. } else {
  187. fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
  188. if (!fi) {
  189. ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  190. return -ENOMEM;
  191. }
  192. file->private_data = fi;
  193. }
  194. fi->fmode = fmode;
  195. spin_lock_init(&fi->rw_contexts_lock);
  196. INIT_LIST_HEAD(&fi->rw_contexts);
  197. return 0;
  198. }
  199. /*
  200. * initialize private struct file data.
  201. * if we fail, clean up by dropping fmode reference on the ceph_inode
  202. */
  203. static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
  204. {
  205. int ret = 0;
  206. switch (inode->i_mode & S_IFMT) {
  207. case S_IFREG:
  208. ceph_fscache_register_inode_cookie(inode);
  209. ceph_fscache_file_set_cookie(inode, file);
  210. case S_IFDIR:
  211. ret = ceph_init_file_info(inode, file, fmode,
  212. S_ISDIR(inode->i_mode));
  213. if (ret)
  214. return ret;
  215. break;
  216. case S_IFLNK:
  217. dout("init_file %p %p 0%o (symlink)\n", inode, file,
  218. inode->i_mode);
  219. ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  220. break;
  221. default:
  222. dout("init_file %p %p 0%o (special)\n", inode, file,
  223. inode->i_mode);
  224. /*
  225. * we need to drop the open ref now, since we don't
  226. * have .release set to ceph_release.
  227. */
  228. ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  229. BUG_ON(inode->i_fop->release == ceph_release);
  230. /* call the proper open fop */
  231. ret = inode->i_fop->open(inode, file);
  232. }
  233. return ret;
  234. }
  235. /*
  236. * try renew caps after session gets killed.
  237. */
  238. int ceph_renew_caps(struct inode *inode)
  239. {
  240. struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
  241. struct ceph_inode_info *ci = ceph_inode(inode);
  242. struct ceph_mds_request *req;
  243. int err, flags, wanted;
  244. spin_lock(&ci->i_ceph_lock);
  245. wanted = __ceph_caps_file_wanted(ci);
  246. if (__ceph_is_any_real_caps(ci) &&
  247. (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
  248. int issued = __ceph_caps_issued(ci, NULL);
  249. spin_unlock(&ci->i_ceph_lock);
  250. dout("renew caps %p want %s issued %s updating mds_wanted\n",
  251. inode, ceph_cap_string(wanted), ceph_cap_string(issued));
  252. ceph_check_caps(ci, 0, NULL);
  253. return 0;
  254. }
  255. spin_unlock(&ci->i_ceph_lock);
  256. flags = 0;
  257. if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
  258. flags = O_RDWR;
  259. else if (wanted & CEPH_CAP_FILE_RD)
  260. flags = O_RDONLY;
  261. else if (wanted & CEPH_CAP_FILE_WR)
  262. flags = O_WRONLY;
  263. #ifdef O_LAZY
  264. if (wanted & CEPH_CAP_FILE_LAZYIO)
  265. flags |= O_LAZY;
  266. #endif
  267. req = prepare_open_request(inode->i_sb, flags, 0);
  268. if (IS_ERR(req)) {
  269. err = PTR_ERR(req);
  270. goto out;
  271. }
  272. req->r_inode = inode;
  273. ihold(inode);
  274. req->r_num_caps = 1;
  275. req->r_fmode = -1;
  276. err = ceph_mdsc_do_request(mdsc, NULL, req);
  277. ceph_mdsc_put_request(req);
  278. out:
  279. dout("renew caps %p open result=%d\n", inode, err);
  280. return err < 0 ? err : 0;
  281. }
  282. /*
  283. * If we already have the requisite capabilities, we can satisfy
  284. * the open request locally (no need to request new caps from the
  285. * MDS). We do, however, need to inform the MDS (asynchronously)
  286. * if our wanted caps set expands.
  287. */
  288. int ceph_open(struct inode *inode, struct file *file)
  289. {
  290. struct ceph_inode_info *ci = ceph_inode(inode);
  291. struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
  292. struct ceph_mds_client *mdsc = fsc->mdsc;
  293. struct ceph_mds_request *req;
  294. struct ceph_file_info *fi = file->private_data;
  295. int err;
  296. int flags, fmode, wanted;
  297. if (fi) {
  298. dout("open file %p is already opened\n", file);
  299. return 0;
  300. }
  301. /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
  302. flags = file->f_flags & ~(O_CREAT|O_EXCL);
  303. if (S_ISDIR(inode->i_mode))
  304. flags = O_DIRECTORY; /* mds likes to know */
  305. dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
  306. ceph_vinop(inode), file, flags, file->f_flags);
  307. fmode = ceph_flags_to_mode(flags);
  308. wanted = ceph_caps_for_mode(fmode);
  309. /* snapped files are read-only */
  310. if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
  311. return -EROFS;
  312. /* trivially open snapdir */
  313. if (ceph_snap(inode) == CEPH_SNAPDIR) {
  314. spin_lock(&ci->i_ceph_lock);
  315. __ceph_get_fmode(ci, fmode);
  316. spin_unlock(&ci->i_ceph_lock);
  317. return ceph_init_file(inode, file, fmode);
  318. }
  319. /*
  320. * No need to block if we have caps on the auth MDS (for
  321. * write) or any MDS (for read). Update wanted set
  322. * asynchronously.
  323. */
  324. spin_lock(&ci->i_ceph_lock);
  325. if (__ceph_is_any_real_caps(ci) &&
  326. (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
  327. int mds_wanted = __ceph_caps_mds_wanted(ci, true);
  328. int issued = __ceph_caps_issued(ci, NULL);
  329. dout("open %p fmode %d want %s issued %s using existing\n",
  330. inode, fmode, ceph_cap_string(wanted),
  331. ceph_cap_string(issued));
  332. __ceph_get_fmode(ci, fmode);
  333. spin_unlock(&ci->i_ceph_lock);
  334. /* adjust wanted? */
  335. if ((issued & wanted) != wanted &&
  336. (mds_wanted & wanted) != wanted &&
  337. ceph_snap(inode) != CEPH_SNAPDIR)
  338. ceph_check_caps(ci, 0, NULL);
  339. return ceph_init_file(inode, file, fmode);
  340. } else if (ceph_snap(inode) != CEPH_NOSNAP &&
  341. (ci->i_snap_caps & wanted) == wanted) {
  342. __ceph_get_fmode(ci, fmode);
  343. spin_unlock(&ci->i_ceph_lock);
  344. return ceph_init_file(inode, file, fmode);
  345. }
  346. spin_unlock(&ci->i_ceph_lock);
  347. dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
  348. req = prepare_open_request(inode->i_sb, flags, 0);
  349. if (IS_ERR(req)) {
  350. err = PTR_ERR(req);
  351. goto out;
  352. }
  353. req->r_inode = inode;
  354. ihold(inode);
  355. req->r_num_caps = 1;
  356. err = ceph_mdsc_do_request(mdsc, NULL, req);
  357. if (!err)
  358. err = ceph_init_file(inode, file, req->r_fmode);
  359. ceph_mdsc_put_request(req);
  360. dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
  361. out:
  362. return err;
  363. }
  364. /*
  365. * Do a lookup + open with a single request. If we get a non-existent
  366. * file or symlink, return 1 so the VFS can retry.
  367. */
  368. int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
  369. struct file *file, unsigned flags, umode_t mode)
  370. {
  371. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  372. struct ceph_mds_client *mdsc = fsc->mdsc;
  373. struct ceph_mds_request *req;
  374. struct dentry *dn;
  375. struct ceph_acls_info acls = {};
  376. int mask;
  377. int err;
  378. dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
  379. dir, dentry, dentry,
  380. d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
  381. if (dentry->d_name.len > NAME_MAX)
  382. return -ENAMETOOLONG;
  383. if (flags & O_CREAT) {
  384. if (ceph_quota_is_max_files_exceeded(dir))
  385. return -EDQUOT;
  386. err = ceph_pre_init_acls(dir, &mode, &acls);
  387. if (err < 0)
  388. return err;
  389. }
  390. /* do the open */
  391. req = prepare_open_request(dir->i_sb, flags, mode);
  392. if (IS_ERR(req)) {
  393. err = PTR_ERR(req);
  394. goto out_acl;
  395. }
  396. req->r_dentry = dget(dentry);
  397. req->r_num_caps = 2;
  398. if (flags & O_CREAT) {
  399. req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
  400. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  401. if (acls.pagelist) {
  402. req->r_pagelist = acls.pagelist;
  403. acls.pagelist = NULL;
  404. }
  405. }
  406. mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
  407. if (ceph_security_xattr_wanted(dir))
  408. mask |= CEPH_CAP_XATTR_SHARED;
  409. req->r_args.open.mask = cpu_to_le32(mask);
  410. req->r_parent = dir;
  411. set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
  412. err = ceph_mdsc_do_request(mdsc,
  413. (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
  414. req);
  415. err = ceph_handle_snapdir(req, dentry, err);
  416. if (err)
  417. goto out_req;
  418. if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
  419. err = ceph_handle_notrace_create(dir, dentry);
  420. if (d_in_lookup(dentry)) {
  421. dn = ceph_finish_lookup(req, dentry, err);
  422. if (IS_ERR(dn))
  423. err = PTR_ERR(dn);
  424. } else {
  425. /* we were given a hashed negative dentry */
  426. dn = NULL;
  427. }
  428. if (err)
  429. goto out_req;
  430. if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
  431. /* make vfs retry on splice, ENOENT, or symlink */
  432. dout("atomic_open finish_no_open on dn %p\n", dn);
  433. err = finish_no_open(file, dn);
  434. } else {
  435. dout("atomic_open finish_open on dn %p\n", dn);
  436. if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
  437. ceph_init_inode_acls(d_inode(dentry), &acls);
  438. file->f_mode |= FMODE_CREATED;
  439. }
  440. err = finish_open(file, dentry, ceph_open);
  441. }
  442. out_req:
  443. if (!req->r_err && req->r_target_inode)
  444. ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
  445. ceph_mdsc_put_request(req);
  446. out_acl:
  447. ceph_release_acls_info(&acls);
  448. dout("atomic_open result=%d\n", err);
  449. return err;
  450. }
  451. int ceph_release(struct inode *inode, struct file *file)
  452. {
  453. struct ceph_inode_info *ci = ceph_inode(inode);
  454. if (S_ISDIR(inode->i_mode)) {
  455. struct ceph_dir_file_info *dfi = file->private_data;
  456. dout("release inode %p dir file %p\n", inode, file);
  457. WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
  458. ceph_put_fmode(ci, dfi->file_info.fmode);
  459. if (dfi->last_readdir)
  460. ceph_mdsc_put_request(dfi->last_readdir);
  461. kfree(dfi->last_name);
  462. kfree(dfi->dir_info);
  463. kmem_cache_free(ceph_dir_file_cachep, dfi);
  464. } else {
  465. struct ceph_file_info *fi = file->private_data;
  466. dout("release inode %p regular file %p\n", inode, file);
  467. WARN_ON(!list_empty(&fi->rw_contexts));
  468. ceph_put_fmode(ci, fi->fmode);
  469. kmem_cache_free(ceph_file_cachep, fi);
  470. }
  471. /* wake up anyone waiting for caps on this inode */
  472. wake_up_all(&ci->i_cap_wq);
  473. return 0;
  474. }
  475. enum {
  476. HAVE_RETRIED = 1,
  477. CHECK_EOF = 2,
  478. READ_INLINE = 3,
  479. };
  480. /*
  481. * Read a range of bytes striped over one or more objects. Iterate over
  482. * objects we stripe over. (That's not atomic, but good enough for now.)
  483. *
  484. * If we get a short result from the OSD, check against i_size; we need to
  485. * only return a short read to the caller if we hit EOF.
  486. */
  487. static int striped_read(struct inode *inode,
  488. u64 pos, u64 len,
  489. struct page **pages, int num_pages,
  490. int page_align, int *checkeof)
  491. {
  492. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  493. struct ceph_inode_info *ci = ceph_inode(inode);
  494. u64 this_len;
  495. loff_t i_size;
  496. int page_idx;
  497. int ret, read = 0;
  498. bool hit_stripe, was_short;
  499. /*
  500. * we may need to do multiple reads. not atomic, unfortunately.
  501. */
  502. more:
  503. this_len = len;
  504. page_idx = (page_align + read) >> PAGE_SHIFT;
  505. ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
  506. &ci->i_layout, pos, &this_len,
  507. ci->i_truncate_seq, ci->i_truncate_size,
  508. pages + page_idx, num_pages - page_idx,
  509. ((page_align + read) & ~PAGE_MASK));
  510. if (ret == -ENOENT)
  511. ret = 0;
  512. hit_stripe = this_len < len;
  513. was_short = ret >= 0 && ret < this_len;
  514. dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read,
  515. ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
  516. i_size = i_size_read(inode);
  517. if (ret >= 0) {
  518. if (was_short && (pos + ret < i_size)) {
  519. int zlen = min(this_len - ret, i_size - pos - ret);
  520. int zoff = page_align + read + ret;
  521. dout(" zero gap %llu to %llu\n",
  522. pos + ret, pos + ret + zlen);
  523. ceph_zero_page_vector_range(zoff, zlen, pages);
  524. ret += zlen;
  525. }
  526. read += ret;
  527. pos += ret;
  528. len -= ret;
  529. /* hit stripe and need continue*/
  530. if (len && hit_stripe && pos < i_size)
  531. goto more;
  532. }
  533. if (read > 0) {
  534. ret = read;
  535. /* did we bounce off eof? */
  536. if (pos + len > i_size)
  537. *checkeof = CHECK_EOF;
  538. }
  539. dout("striped_read returns %d\n", ret);
  540. return ret;
  541. }
  542. /*
  543. * Completely synchronous read and write methods. Direct from __user
  544. * buffer to osd, or directly to user pages (if O_DIRECT).
  545. *
  546. * If the read spans object boundary, just do multiple reads.
  547. */
  548. static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
  549. int *checkeof)
  550. {
  551. struct file *file = iocb->ki_filp;
  552. struct inode *inode = file_inode(file);
  553. struct page **pages;
  554. u64 off = iocb->ki_pos;
  555. int num_pages;
  556. ssize_t ret;
  557. size_t len = iov_iter_count(to);
  558. dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
  559. (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
  560. if (!len)
  561. return 0;
  562. /*
  563. * flush any page cache pages in this range. this
  564. * will make concurrent normal and sync io slow,
  565. * but it will at least behave sensibly when they are
  566. * in sequence.
  567. */
  568. ret = filemap_write_and_wait_range(inode->i_mapping, off,
  569. off + len);
  570. if (ret < 0)
  571. return ret;
  572. if (unlikely(to->type & ITER_PIPE)) {
  573. size_t page_off;
  574. ret = iov_iter_get_pages_alloc(to, &pages, len,
  575. &page_off);
  576. if (ret <= 0)
  577. return -ENOMEM;
  578. num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
  579. ret = striped_read(inode, off, ret, pages, num_pages,
  580. page_off, checkeof);
  581. if (ret > 0) {
  582. iov_iter_advance(to, ret);
  583. off += ret;
  584. } else {
  585. iov_iter_advance(to, 0);
  586. }
  587. ceph_put_page_vector(pages, num_pages, false);
  588. } else {
  589. num_pages = calc_pages_for(off, len);
  590. pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
  591. if (IS_ERR(pages))
  592. return PTR_ERR(pages);
  593. ret = striped_read(inode, off, len, pages, num_pages,
  594. (off & ~PAGE_MASK), checkeof);
  595. if (ret > 0) {
  596. int l, k = 0;
  597. size_t left = ret;
  598. while (left) {
  599. size_t page_off = off & ~PAGE_MASK;
  600. size_t copy = min_t(size_t, left,
  601. PAGE_SIZE - page_off);
  602. l = copy_page_to_iter(pages[k++], page_off,
  603. copy, to);
  604. off += l;
  605. left -= l;
  606. if (l < copy)
  607. break;
  608. }
  609. }
  610. ceph_release_page_vector(pages, num_pages);
  611. }
  612. if (off > iocb->ki_pos) {
  613. ret = off - iocb->ki_pos;
  614. iocb->ki_pos = off;
  615. }
  616. dout("sync_read result %zd\n", ret);
  617. return ret;
  618. }
  619. struct ceph_aio_request {
  620. struct kiocb *iocb;
  621. size_t total_len;
  622. bool write;
  623. bool should_dirty;
  624. int error;
  625. struct list_head osd_reqs;
  626. unsigned num_reqs;
  627. atomic_t pending_reqs;
  628. struct timespec64 mtime;
  629. struct ceph_cap_flush *prealloc_cf;
  630. };
  631. struct ceph_aio_work {
  632. struct work_struct work;
  633. struct ceph_osd_request *req;
  634. };
  635. static void ceph_aio_retry_work(struct work_struct *work);
  636. static void ceph_aio_complete(struct inode *inode,
  637. struct ceph_aio_request *aio_req)
  638. {
  639. struct ceph_inode_info *ci = ceph_inode(inode);
  640. int ret;
  641. if (!atomic_dec_and_test(&aio_req->pending_reqs))
  642. return;
  643. ret = aio_req->error;
  644. if (!ret)
  645. ret = aio_req->total_len;
  646. dout("ceph_aio_complete %p rc %d\n", inode, ret);
  647. if (ret >= 0 && aio_req->write) {
  648. int dirty;
  649. loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
  650. if (endoff > i_size_read(inode)) {
  651. if (ceph_inode_set_size(inode, endoff))
  652. ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
  653. }
  654. spin_lock(&ci->i_ceph_lock);
  655. ci->i_inline_version = CEPH_INLINE_NONE;
  656. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
  657. &aio_req->prealloc_cf);
  658. spin_unlock(&ci->i_ceph_lock);
  659. if (dirty)
  660. __mark_inode_dirty(inode, dirty);
  661. }
  662. ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
  663. CEPH_CAP_FILE_RD));
  664. aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
  665. ceph_free_cap_flush(aio_req->prealloc_cf);
  666. kfree(aio_req);
  667. }
  668. static void ceph_aio_complete_req(struct ceph_osd_request *req)
  669. {
  670. int rc = req->r_result;
  671. struct inode *inode = req->r_inode;
  672. struct ceph_aio_request *aio_req = req->r_priv;
  673. struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
  674. BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
  675. BUG_ON(!osd_data->num_bvecs);
  676. dout("ceph_aio_complete_req %p rc %d bytes %u\n",
  677. inode, rc, osd_data->bvec_pos.iter.bi_size);
  678. if (rc == -EOLDSNAPC) {
  679. struct ceph_aio_work *aio_work;
  680. BUG_ON(!aio_req->write);
  681. aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
  682. if (aio_work) {
  683. INIT_WORK(&aio_work->work, ceph_aio_retry_work);
  684. aio_work->req = req;
  685. queue_work(ceph_inode_to_client(inode)->wb_wq,
  686. &aio_work->work);
  687. return;
  688. }
  689. rc = -ENOMEM;
  690. } else if (!aio_req->write) {
  691. if (rc == -ENOENT)
  692. rc = 0;
  693. if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
  694. struct iov_iter i;
  695. int zlen = osd_data->bvec_pos.iter.bi_size - rc;
  696. /*
  697. * If read is satisfied by single OSD request,
  698. * it can pass EOF. Otherwise read is within
  699. * i_size.
  700. */
  701. if (aio_req->num_reqs == 1) {
  702. loff_t i_size = i_size_read(inode);
  703. loff_t endoff = aio_req->iocb->ki_pos + rc;
  704. if (endoff < i_size)
  705. zlen = min_t(size_t, zlen,
  706. i_size - endoff);
  707. aio_req->total_len = rc + zlen;
  708. }
  709. iov_iter_bvec(&i, ITER_BVEC, osd_data->bvec_pos.bvecs,
  710. osd_data->num_bvecs,
  711. osd_data->bvec_pos.iter.bi_size);
  712. iov_iter_advance(&i, rc);
  713. iov_iter_zero(zlen, &i);
  714. }
  715. }
  716. put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
  717. aio_req->should_dirty);
  718. ceph_osdc_put_request(req);
  719. if (rc < 0)
  720. cmpxchg(&aio_req->error, 0, rc);
  721. ceph_aio_complete(inode, aio_req);
  722. return;
  723. }
  724. static void ceph_aio_retry_work(struct work_struct *work)
  725. {
  726. struct ceph_aio_work *aio_work =
  727. container_of(work, struct ceph_aio_work, work);
  728. struct ceph_osd_request *orig_req = aio_work->req;
  729. struct ceph_aio_request *aio_req = orig_req->r_priv;
  730. struct inode *inode = orig_req->r_inode;
  731. struct ceph_inode_info *ci = ceph_inode(inode);
  732. struct ceph_snap_context *snapc;
  733. struct ceph_osd_request *req;
  734. int ret;
  735. spin_lock(&ci->i_ceph_lock);
  736. if (__ceph_have_pending_cap_snap(ci)) {
  737. struct ceph_cap_snap *capsnap =
  738. list_last_entry(&ci->i_cap_snaps,
  739. struct ceph_cap_snap,
  740. ci_item);
  741. snapc = ceph_get_snap_context(capsnap->context);
  742. } else {
  743. BUG_ON(!ci->i_head_snapc);
  744. snapc = ceph_get_snap_context(ci->i_head_snapc);
  745. }
  746. spin_unlock(&ci->i_ceph_lock);
  747. req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
  748. false, GFP_NOFS);
  749. if (!req) {
  750. ret = -ENOMEM;
  751. req = orig_req;
  752. goto out;
  753. }
  754. req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
  755. ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
  756. ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
  757. ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
  758. if (ret) {
  759. ceph_osdc_put_request(req);
  760. req = orig_req;
  761. goto out;
  762. }
  763. req->r_ops[0] = orig_req->r_ops[0];
  764. req->r_mtime = aio_req->mtime;
  765. req->r_data_offset = req->r_ops[0].extent.offset;
  766. ceph_osdc_put_request(orig_req);
  767. req->r_callback = ceph_aio_complete_req;
  768. req->r_inode = inode;
  769. req->r_priv = aio_req;
  770. ret = ceph_osdc_start_request(req->r_osdc, req, false);
  771. out:
  772. if (ret < 0) {
  773. req->r_result = ret;
  774. ceph_aio_complete_req(req);
  775. }
  776. ceph_put_snap_context(snapc);
  777. kfree(aio_work);
  778. }
  779. static ssize_t
  780. ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
  781. struct ceph_snap_context *snapc,
  782. struct ceph_cap_flush **pcf)
  783. {
  784. struct file *file = iocb->ki_filp;
  785. struct inode *inode = file_inode(file);
  786. struct ceph_inode_info *ci = ceph_inode(inode);
  787. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  788. struct ceph_vino vino;
  789. struct ceph_osd_request *req;
  790. struct bio_vec *bvecs;
  791. struct ceph_aio_request *aio_req = NULL;
  792. int num_pages = 0;
  793. int flags;
  794. int ret;
  795. struct timespec64 mtime = current_time(inode);
  796. size_t count = iov_iter_count(iter);
  797. loff_t pos = iocb->ki_pos;
  798. bool write = iov_iter_rw(iter) == WRITE;
  799. bool should_dirty = !write && iter_is_iovec(iter);
  800. if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
  801. return -EROFS;
  802. dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
  803. (write ? "write" : "read"), file, pos, (unsigned)count,
  804. snapc, snapc->seq);
  805. ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
  806. if (ret < 0)
  807. return ret;
  808. if (write) {
  809. int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
  810. pos >> PAGE_SHIFT,
  811. (pos + count) >> PAGE_SHIFT);
  812. if (ret2 < 0)
  813. dout("invalidate_inode_pages2_range returned %d\n", ret2);
  814. flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
  815. } else {
  816. flags = CEPH_OSD_FLAG_READ;
  817. }
  818. while (iov_iter_count(iter) > 0) {
  819. u64 size = iov_iter_count(iter);
  820. ssize_t len;
  821. if (write)
  822. size = min_t(u64, size, fsc->mount_options->wsize);
  823. else
  824. size = min_t(u64, size, fsc->mount_options->rsize);
  825. vino = ceph_vino(inode);
  826. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  827. vino, pos, &size, 0,
  828. 1,
  829. write ? CEPH_OSD_OP_WRITE :
  830. CEPH_OSD_OP_READ,
  831. flags, snapc,
  832. ci->i_truncate_seq,
  833. ci->i_truncate_size,
  834. false);
  835. if (IS_ERR(req)) {
  836. ret = PTR_ERR(req);
  837. break;
  838. }
  839. len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
  840. if (len < 0) {
  841. ceph_osdc_put_request(req);
  842. ret = len;
  843. break;
  844. }
  845. if (len != size)
  846. osd_req_op_extent_update(req, 0, len);
  847. /*
  848. * To simplify error handling, allow AIO when IO within i_size
  849. * or IO can be satisfied by single OSD request.
  850. */
  851. if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
  852. (len == count || pos + count <= i_size_read(inode))) {
  853. aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
  854. if (aio_req) {
  855. aio_req->iocb = iocb;
  856. aio_req->write = write;
  857. aio_req->should_dirty = should_dirty;
  858. INIT_LIST_HEAD(&aio_req->osd_reqs);
  859. if (write) {
  860. aio_req->mtime = mtime;
  861. swap(aio_req->prealloc_cf, *pcf);
  862. }
  863. }
  864. /* ignore error */
  865. }
  866. if (write) {
  867. /*
  868. * throw out any page cache pages in this range. this
  869. * may block.
  870. */
  871. truncate_inode_pages_range(inode->i_mapping, pos,
  872. (pos+len) | (PAGE_SIZE - 1));
  873. req->r_mtime = mtime;
  874. }
  875. osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
  876. if (aio_req) {
  877. aio_req->total_len += len;
  878. aio_req->num_reqs++;
  879. atomic_inc(&aio_req->pending_reqs);
  880. req->r_callback = ceph_aio_complete_req;
  881. req->r_inode = inode;
  882. req->r_priv = aio_req;
  883. list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
  884. pos += len;
  885. continue;
  886. }
  887. ret = ceph_osdc_start_request(req->r_osdc, req, false);
  888. if (!ret)
  889. ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
  890. size = i_size_read(inode);
  891. if (!write) {
  892. if (ret == -ENOENT)
  893. ret = 0;
  894. if (ret >= 0 && ret < len && pos + ret < size) {
  895. struct iov_iter i;
  896. int zlen = min_t(size_t, len - ret,
  897. size - pos - ret);
  898. iov_iter_bvec(&i, ITER_BVEC, bvecs, num_pages,
  899. len);
  900. iov_iter_advance(&i, ret);
  901. iov_iter_zero(zlen, &i);
  902. ret += zlen;
  903. }
  904. if (ret >= 0)
  905. len = ret;
  906. }
  907. put_bvecs(bvecs, num_pages, should_dirty);
  908. ceph_osdc_put_request(req);
  909. if (ret < 0)
  910. break;
  911. pos += len;
  912. if (!write && pos >= size)
  913. break;
  914. if (write && pos > size) {
  915. if (ceph_inode_set_size(inode, pos))
  916. ceph_check_caps(ceph_inode(inode),
  917. CHECK_CAPS_AUTHONLY,
  918. NULL);
  919. }
  920. }
  921. if (aio_req) {
  922. LIST_HEAD(osd_reqs);
  923. if (aio_req->num_reqs == 0) {
  924. kfree(aio_req);
  925. return ret;
  926. }
  927. ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
  928. CEPH_CAP_FILE_RD);
  929. list_splice(&aio_req->osd_reqs, &osd_reqs);
  930. while (!list_empty(&osd_reqs)) {
  931. req = list_first_entry(&osd_reqs,
  932. struct ceph_osd_request,
  933. r_unsafe_item);
  934. list_del_init(&req->r_unsafe_item);
  935. if (ret >= 0)
  936. ret = ceph_osdc_start_request(req->r_osdc,
  937. req, false);
  938. if (ret < 0) {
  939. req->r_result = ret;
  940. ceph_aio_complete_req(req);
  941. }
  942. }
  943. return -EIOCBQUEUED;
  944. }
  945. if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
  946. ret = pos - iocb->ki_pos;
  947. iocb->ki_pos = pos;
  948. }
  949. return ret;
  950. }
  951. /*
  952. * Synchronous write, straight from __user pointer or user pages.
  953. *
  954. * If write spans object boundary, just do multiple writes. (For a
  955. * correct atomic write, we should e.g. take write locks on all
  956. * objects, rollback on failure, etc.)
  957. */
  958. static ssize_t
  959. ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
  960. struct ceph_snap_context *snapc)
  961. {
  962. struct file *file = iocb->ki_filp;
  963. struct inode *inode = file_inode(file);
  964. struct ceph_inode_info *ci = ceph_inode(inode);
  965. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  966. struct ceph_vino vino;
  967. struct ceph_osd_request *req;
  968. struct page **pages;
  969. u64 len;
  970. int num_pages;
  971. int written = 0;
  972. int flags;
  973. int ret;
  974. bool check_caps = false;
  975. struct timespec64 mtime = current_time(inode);
  976. size_t count = iov_iter_count(from);
  977. if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
  978. return -EROFS;
  979. dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
  980. file, pos, (unsigned)count, snapc, snapc->seq);
  981. ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
  982. if (ret < 0)
  983. return ret;
  984. ret = invalidate_inode_pages2_range(inode->i_mapping,
  985. pos >> PAGE_SHIFT,
  986. (pos + count) >> PAGE_SHIFT);
  987. if (ret < 0)
  988. dout("invalidate_inode_pages2_range returned %d\n", ret);
  989. flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
  990. while ((len = iov_iter_count(from)) > 0) {
  991. size_t left;
  992. int n;
  993. vino = ceph_vino(inode);
  994. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  995. vino, pos, &len, 0, 1,
  996. CEPH_OSD_OP_WRITE, flags, snapc,
  997. ci->i_truncate_seq,
  998. ci->i_truncate_size,
  999. false);
  1000. if (IS_ERR(req)) {
  1001. ret = PTR_ERR(req);
  1002. break;
  1003. }
  1004. /*
  1005. * write from beginning of first page,
  1006. * regardless of io alignment
  1007. */
  1008. num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  1009. pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
  1010. if (IS_ERR(pages)) {
  1011. ret = PTR_ERR(pages);
  1012. goto out;
  1013. }
  1014. left = len;
  1015. for (n = 0; n < num_pages; n++) {
  1016. size_t plen = min_t(size_t, left, PAGE_SIZE);
  1017. ret = copy_page_from_iter(pages[n], 0, plen, from);
  1018. if (ret != plen) {
  1019. ret = -EFAULT;
  1020. break;
  1021. }
  1022. left -= ret;
  1023. }
  1024. if (ret < 0) {
  1025. ceph_release_page_vector(pages, num_pages);
  1026. goto out;
  1027. }
  1028. req->r_inode = inode;
  1029. osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
  1030. false, true);
  1031. req->r_mtime = mtime;
  1032. ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  1033. if (!ret)
  1034. ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
  1035. out:
  1036. ceph_osdc_put_request(req);
  1037. if (ret != 0) {
  1038. ceph_set_error_write(ci);
  1039. break;
  1040. }
  1041. ceph_clear_error_write(ci);
  1042. pos += len;
  1043. written += len;
  1044. if (pos > i_size_read(inode)) {
  1045. check_caps = ceph_inode_set_size(inode, pos);
  1046. if (check_caps)
  1047. ceph_check_caps(ceph_inode(inode),
  1048. CHECK_CAPS_AUTHONLY,
  1049. NULL);
  1050. }
  1051. }
  1052. if (ret != -EOLDSNAPC && written > 0) {
  1053. ret = written;
  1054. iocb->ki_pos = pos;
  1055. }
  1056. return ret;
  1057. }
  1058. /*
  1059. * Wrap generic_file_aio_read with checks for cap bits on the inode.
  1060. * Atomically grab references, so that those bits are not released
  1061. * back to the MDS mid-read.
  1062. *
  1063. * Hmm, the sync read case isn't actually async... should it be?
  1064. */
  1065. static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
  1066. {
  1067. struct file *filp = iocb->ki_filp;
  1068. struct ceph_file_info *fi = filp->private_data;
  1069. size_t len = iov_iter_count(to);
  1070. struct inode *inode = file_inode(filp);
  1071. struct ceph_inode_info *ci = ceph_inode(inode);
  1072. struct page *pinned_page = NULL;
  1073. ssize_t ret;
  1074. int want, got = 0;
  1075. int retry_op = 0, read = 0;
  1076. again:
  1077. dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
  1078. inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
  1079. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  1080. want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
  1081. else
  1082. want = CEPH_CAP_FILE_CACHE;
  1083. ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
  1084. if (ret < 0)
  1085. return ret;
  1086. if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
  1087. (iocb->ki_flags & IOCB_DIRECT) ||
  1088. (fi->flags & CEPH_F_SYNC)) {
  1089. dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
  1090. inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
  1091. ceph_cap_string(got));
  1092. if (ci->i_inline_version == CEPH_INLINE_NONE) {
  1093. if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
  1094. ret = ceph_direct_read_write(iocb, to,
  1095. NULL, NULL);
  1096. if (ret >= 0 && ret < len)
  1097. retry_op = CHECK_EOF;
  1098. } else {
  1099. ret = ceph_sync_read(iocb, to, &retry_op);
  1100. }
  1101. } else {
  1102. retry_op = READ_INLINE;
  1103. }
  1104. } else {
  1105. CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
  1106. dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
  1107. inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
  1108. ceph_cap_string(got));
  1109. ceph_add_rw_context(fi, &rw_ctx);
  1110. ret = generic_file_read_iter(iocb, to);
  1111. ceph_del_rw_context(fi, &rw_ctx);
  1112. }
  1113. dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
  1114. inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
  1115. if (pinned_page) {
  1116. put_page(pinned_page);
  1117. pinned_page = NULL;
  1118. }
  1119. ceph_put_cap_refs(ci, got);
  1120. if (retry_op > HAVE_RETRIED && ret >= 0) {
  1121. int statret;
  1122. struct page *page = NULL;
  1123. loff_t i_size;
  1124. if (retry_op == READ_INLINE) {
  1125. page = __page_cache_alloc(GFP_KERNEL);
  1126. if (!page)
  1127. return -ENOMEM;
  1128. }
  1129. statret = __ceph_do_getattr(inode, page,
  1130. CEPH_STAT_CAP_INLINE_DATA, !!page);
  1131. if (statret < 0) {
  1132. if (page)
  1133. __free_page(page);
  1134. if (statret == -ENODATA) {
  1135. BUG_ON(retry_op != READ_INLINE);
  1136. goto again;
  1137. }
  1138. return statret;
  1139. }
  1140. i_size = i_size_read(inode);
  1141. if (retry_op == READ_INLINE) {
  1142. BUG_ON(ret > 0 || read > 0);
  1143. if (iocb->ki_pos < i_size &&
  1144. iocb->ki_pos < PAGE_SIZE) {
  1145. loff_t end = min_t(loff_t, i_size,
  1146. iocb->ki_pos + len);
  1147. end = min_t(loff_t, end, PAGE_SIZE);
  1148. if (statret < end)
  1149. zero_user_segment(page, statret, end);
  1150. ret = copy_page_to_iter(page,
  1151. iocb->ki_pos & ~PAGE_MASK,
  1152. end - iocb->ki_pos, to);
  1153. iocb->ki_pos += ret;
  1154. read += ret;
  1155. }
  1156. if (iocb->ki_pos < i_size && read < len) {
  1157. size_t zlen = min_t(size_t, len - read,
  1158. i_size - iocb->ki_pos);
  1159. ret = iov_iter_zero(zlen, to);
  1160. iocb->ki_pos += ret;
  1161. read += ret;
  1162. }
  1163. __free_pages(page, 0);
  1164. return read;
  1165. }
  1166. /* hit EOF or hole? */
  1167. if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
  1168. ret < len) {
  1169. dout("sync_read hit hole, ppos %lld < size %lld"
  1170. ", reading more\n", iocb->ki_pos, i_size);
  1171. read += ret;
  1172. len -= ret;
  1173. retry_op = HAVE_RETRIED;
  1174. goto again;
  1175. }
  1176. }
  1177. if (ret >= 0)
  1178. ret += read;
  1179. return ret;
  1180. }
  1181. /*
  1182. * Take cap references to avoid releasing caps to MDS mid-write.
  1183. *
  1184. * If we are synchronous, and write with an old snap context, the OSD
  1185. * may return EOLDSNAPC. In that case, retry the write.. _after_
  1186. * dropping our cap refs and allowing the pending snap to logically
  1187. * complete _before_ this write occurs.
  1188. *
  1189. * If we are near ENOSPC, write synchronously.
  1190. */
  1191. static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
  1192. {
  1193. struct file *file = iocb->ki_filp;
  1194. struct ceph_file_info *fi = file->private_data;
  1195. struct inode *inode = file_inode(file);
  1196. struct ceph_inode_info *ci = ceph_inode(inode);
  1197. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  1198. struct ceph_osd_client *osdc = &fsc->client->osdc;
  1199. struct ceph_cap_flush *prealloc_cf;
  1200. ssize_t count, written = 0;
  1201. int err, want, got;
  1202. u32 map_flags;
  1203. u64 pool_flags;
  1204. loff_t pos;
  1205. loff_t limit = max(i_size_read(inode), fsc->max_file_size);
  1206. if (ceph_snap(inode) != CEPH_NOSNAP)
  1207. return -EROFS;
  1208. prealloc_cf = ceph_alloc_cap_flush();
  1209. if (!prealloc_cf)
  1210. return -ENOMEM;
  1211. retry_snap:
  1212. inode_lock(inode);
  1213. /* We can write back this queue in page reclaim */
  1214. current->backing_dev_info = inode_to_bdi(inode);
  1215. if (iocb->ki_flags & IOCB_APPEND) {
  1216. err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
  1217. if (err < 0)
  1218. goto out;
  1219. }
  1220. err = generic_write_checks(iocb, from);
  1221. if (err <= 0)
  1222. goto out;
  1223. pos = iocb->ki_pos;
  1224. if (unlikely(pos >= limit)) {
  1225. err = -EFBIG;
  1226. goto out;
  1227. } else {
  1228. iov_iter_truncate(from, limit - pos);
  1229. }
  1230. count = iov_iter_count(from);
  1231. if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
  1232. err = -EDQUOT;
  1233. goto out;
  1234. }
  1235. err = file_remove_privs(file);
  1236. if (err)
  1237. goto out;
  1238. err = file_update_time(file);
  1239. if (err)
  1240. goto out;
  1241. if (ci->i_inline_version != CEPH_INLINE_NONE) {
  1242. err = ceph_uninline_data(file, NULL);
  1243. if (err < 0)
  1244. goto out;
  1245. }
  1246. down_read(&osdc->lock);
  1247. map_flags = osdc->osdmap->flags;
  1248. pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
  1249. up_read(&osdc->lock);
  1250. if ((map_flags & CEPH_OSDMAP_FULL) ||
  1251. (pool_flags & CEPH_POOL_FLAG_FULL)) {
  1252. err = -ENOSPC;
  1253. goto out;
  1254. }
  1255. dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
  1256. inode, ceph_vinop(inode), pos, count, i_size_read(inode));
  1257. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  1258. want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
  1259. else
  1260. want = CEPH_CAP_FILE_BUFFER;
  1261. got = 0;
  1262. err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
  1263. &got, NULL);
  1264. if (err < 0)
  1265. goto out;
  1266. dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
  1267. inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
  1268. if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
  1269. (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
  1270. (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
  1271. struct ceph_snap_context *snapc;
  1272. struct iov_iter data;
  1273. inode_unlock(inode);
  1274. spin_lock(&ci->i_ceph_lock);
  1275. if (__ceph_have_pending_cap_snap(ci)) {
  1276. struct ceph_cap_snap *capsnap =
  1277. list_last_entry(&ci->i_cap_snaps,
  1278. struct ceph_cap_snap,
  1279. ci_item);
  1280. snapc = ceph_get_snap_context(capsnap->context);
  1281. } else {
  1282. BUG_ON(!ci->i_head_snapc);
  1283. snapc = ceph_get_snap_context(ci->i_head_snapc);
  1284. }
  1285. spin_unlock(&ci->i_ceph_lock);
  1286. /* we might need to revert back to that point */
  1287. data = *from;
  1288. if (iocb->ki_flags & IOCB_DIRECT)
  1289. written = ceph_direct_read_write(iocb, &data, snapc,
  1290. &prealloc_cf);
  1291. else
  1292. written = ceph_sync_write(iocb, &data, pos, snapc);
  1293. if (written > 0)
  1294. iov_iter_advance(from, written);
  1295. ceph_put_snap_context(snapc);
  1296. } else {
  1297. /*
  1298. * No need to acquire the i_truncate_mutex. Because
  1299. * the MDS revokes Fwb caps before sending truncate
  1300. * message to us. We can't get Fwb cap while there
  1301. * are pending vmtruncate. So write and vmtruncate
  1302. * can not run at the same time
  1303. */
  1304. written = generic_perform_write(file, from, pos);
  1305. if (likely(written >= 0))
  1306. iocb->ki_pos = pos + written;
  1307. inode_unlock(inode);
  1308. }
  1309. if (written >= 0) {
  1310. int dirty;
  1311. spin_lock(&ci->i_ceph_lock);
  1312. ci->i_inline_version = CEPH_INLINE_NONE;
  1313. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
  1314. &prealloc_cf);
  1315. spin_unlock(&ci->i_ceph_lock);
  1316. if (dirty)
  1317. __mark_inode_dirty(inode, dirty);
  1318. if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
  1319. ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
  1320. }
  1321. dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
  1322. inode, ceph_vinop(inode), pos, (unsigned)count,
  1323. ceph_cap_string(got));
  1324. ceph_put_cap_refs(ci, got);
  1325. if (written == -EOLDSNAPC) {
  1326. dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
  1327. inode, ceph_vinop(inode), pos, (unsigned)count);
  1328. goto retry_snap;
  1329. }
  1330. if (written >= 0) {
  1331. if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
  1332. (pool_flags & CEPH_POOL_FLAG_NEARFULL))
  1333. iocb->ki_flags |= IOCB_DSYNC;
  1334. written = generic_write_sync(iocb, written);
  1335. }
  1336. goto out_unlocked;
  1337. out:
  1338. inode_unlock(inode);
  1339. out_unlocked:
  1340. ceph_free_cap_flush(prealloc_cf);
  1341. current->backing_dev_info = NULL;
  1342. return written ? written : err;
  1343. }
  1344. /*
  1345. * llseek. be sure to verify file size on SEEK_END.
  1346. */
  1347. static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
  1348. {
  1349. struct inode *inode = file->f_mapping->host;
  1350. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  1351. loff_t i_size;
  1352. loff_t ret;
  1353. inode_lock(inode);
  1354. if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
  1355. ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
  1356. if (ret < 0)
  1357. goto out;
  1358. }
  1359. i_size = i_size_read(inode);
  1360. switch (whence) {
  1361. case SEEK_END:
  1362. offset += i_size;
  1363. break;
  1364. case SEEK_CUR:
  1365. /*
  1366. * Here we special-case the lseek(fd, 0, SEEK_CUR)
  1367. * position-querying operation. Avoid rewriting the "same"
  1368. * f_pos value back to the file because a concurrent read(),
  1369. * write() or lseek() might have altered it
  1370. */
  1371. if (offset == 0) {
  1372. ret = file->f_pos;
  1373. goto out;
  1374. }
  1375. offset += file->f_pos;
  1376. break;
  1377. case SEEK_DATA:
  1378. if (offset < 0 || offset >= i_size) {
  1379. ret = -ENXIO;
  1380. goto out;
  1381. }
  1382. break;
  1383. case SEEK_HOLE:
  1384. if (offset < 0 || offset >= i_size) {
  1385. ret = -ENXIO;
  1386. goto out;
  1387. }
  1388. offset = i_size;
  1389. break;
  1390. }
  1391. ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
  1392. out:
  1393. inode_unlock(inode);
  1394. return ret;
  1395. }
  1396. static inline void ceph_zero_partial_page(
  1397. struct inode *inode, loff_t offset, unsigned size)
  1398. {
  1399. struct page *page;
  1400. pgoff_t index = offset >> PAGE_SHIFT;
  1401. page = find_lock_page(inode->i_mapping, index);
  1402. if (page) {
  1403. wait_on_page_writeback(page);
  1404. zero_user(page, offset & (PAGE_SIZE - 1), size);
  1405. unlock_page(page);
  1406. put_page(page);
  1407. }
  1408. }
  1409. static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
  1410. loff_t length)
  1411. {
  1412. loff_t nearly = round_up(offset, PAGE_SIZE);
  1413. if (offset < nearly) {
  1414. loff_t size = nearly - offset;
  1415. if (length < size)
  1416. size = length;
  1417. ceph_zero_partial_page(inode, offset, size);
  1418. offset += size;
  1419. length -= size;
  1420. }
  1421. if (length >= PAGE_SIZE) {
  1422. loff_t size = round_down(length, PAGE_SIZE);
  1423. truncate_pagecache_range(inode, offset, offset + size - 1);
  1424. offset += size;
  1425. length -= size;
  1426. }
  1427. if (length)
  1428. ceph_zero_partial_page(inode, offset, length);
  1429. }
  1430. static int ceph_zero_partial_object(struct inode *inode,
  1431. loff_t offset, loff_t *length)
  1432. {
  1433. struct ceph_inode_info *ci = ceph_inode(inode);
  1434. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  1435. struct ceph_osd_request *req;
  1436. int ret = 0;
  1437. loff_t zero = 0;
  1438. int op;
  1439. if (!length) {
  1440. op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
  1441. length = &zero;
  1442. } else {
  1443. op = CEPH_OSD_OP_ZERO;
  1444. }
  1445. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  1446. ceph_vino(inode),
  1447. offset, length,
  1448. 0, 1, op,
  1449. CEPH_OSD_FLAG_WRITE,
  1450. NULL, 0, 0, false);
  1451. if (IS_ERR(req)) {
  1452. ret = PTR_ERR(req);
  1453. goto out;
  1454. }
  1455. req->r_mtime = inode->i_mtime;
  1456. ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  1457. if (!ret) {
  1458. ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
  1459. if (ret == -ENOENT)
  1460. ret = 0;
  1461. }
  1462. ceph_osdc_put_request(req);
  1463. out:
  1464. return ret;
  1465. }
  1466. static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
  1467. {
  1468. int ret = 0;
  1469. struct ceph_inode_info *ci = ceph_inode(inode);
  1470. s32 stripe_unit = ci->i_layout.stripe_unit;
  1471. s32 stripe_count = ci->i_layout.stripe_count;
  1472. s32 object_size = ci->i_layout.object_size;
  1473. u64 object_set_size = object_size * stripe_count;
  1474. u64 nearly, t;
  1475. /* round offset up to next period boundary */
  1476. nearly = offset + object_set_size - 1;
  1477. t = nearly;
  1478. nearly -= do_div(t, object_set_size);
  1479. while (length && offset < nearly) {
  1480. loff_t size = length;
  1481. ret = ceph_zero_partial_object(inode, offset, &size);
  1482. if (ret < 0)
  1483. return ret;
  1484. offset += size;
  1485. length -= size;
  1486. }
  1487. while (length >= object_set_size) {
  1488. int i;
  1489. loff_t pos = offset;
  1490. for (i = 0; i < stripe_count; ++i) {
  1491. ret = ceph_zero_partial_object(inode, pos, NULL);
  1492. if (ret < 0)
  1493. return ret;
  1494. pos += stripe_unit;
  1495. }
  1496. offset += object_set_size;
  1497. length -= object_set_size;
  1498. }
  1499. while (length) {
  1500. loff_t size = length;
  1501. ret = ceph_zero_partial_object(inode, offset, &size);
  1502. if (ret < 0)
  1503. return ret;
  1504. offset += size;
  1505. length -= size;
  1506. }
  1507. return ret;
  1508. }
  1509. static long ceph_fallocate(struct file *file, int mode,
  1510. loff_t offset, loff_t length)
  1511. {
  1512. struct ceph_file_info *fi = file->private_data;
  1513. struct inode *inode = file_inode(file);
  1514. struct ceph_inode_info *ci = ceph_inode(inode);
  1515. struct ceph_cap_flush *prealloc_cf;
  1516. int want, got = 0;
  1517. int dirty;
  1518. int ret = 0;
  1519. loff_t endoff = 0;
  1520. loff_t size;
  1521. if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
  1522. return -EOPNOTSUPP;
  1523. if (!S_ISREG(inode->i_mode))
  1524. return -EOPNOTSUPP;
  1525. prealloc_cf = ceph_alloc_cap_flush();
  1526. if (!prealloc_cf)
  1527. return -ENOMEM;
  1528. inode_lock(inode);
  1529. if (ceph_snap(inode) != CEPH_NOSNAP) {
  1530. ret = -EROFS;
  1531. goto unlock;
  1532. }
  1533. if (ci->i_inline_version != CEPH_INLINE_NONE) {
  1534. ret = ceph_uninline_data(file, NULL);
  1535. if (ret < 0)
  1536. goto unlock;
  1537. }
  1538. size = i_size_read(inode);
  1539. /* Are we punching a hole beyond EOF? */
  1540. if (offset >= size)
  1541. goto unlock;
  1542. if ((offset + length) > size)
  1543. length = size - offset;
  1544. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  1545. want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
  1546. else
  1547. want = CEPH_CAP_FILE_BUFFER;
  1548. ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
  1549. if (ret < 0)
  1550. goto unlock;
  1551. ceph_zero_pagecache_range(inode, offset, length);
  1552. ret = ceph_zero_objects(inode, offset, length);
  1553. if (!ret) {
  1554. spin_lock(&ci->i_ceph_lock);
  1555. ci->i_inline_version = CEPH_INLINE_NONE;
  1556. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
  1557. &prealloc_cf);
  1558. spin_unlock(&ci->i_ceph_lock);
  1559. if (dirty)
  1560. __mark_inode_dirty(inode, dirty);
  1561. }
  1562. ceph_put_cap_refs(ci, got);
  1563. unlock:
  1564. inode_unlock(inode);
  1565. ceph_free_cap_flush(prealloc_cf);
  1566. return ret;
  1567. }
  1568. const struct file_operations ceph_file_fops = {
  1569. .open = ceph_open,
  1570. .release = ceph_release,
  1571. .llseek = ceph_llseek,
  1572. .read_iter = ceph_read_iter,
  1573. .write_iter = ceph_write_iter,
  1574. .mmap = ceph_mmap,
  1575. .fsync = ceph_fsync,
  1576. .lock = ceph_lock,
  1577. .flock = ceph_flock,
  1578. .splice_read = generic_file_splice_read,
  1579. .splice_write = iter_file_splice_write,
  1580. .unlocked_ioctl = ceph_ioctl,
  1581. .compat_ioctl = ceph_ioctl,
  1582. .fallocate = ceph_fallocate,
  1583. };