xattr.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107
  1. /*
  2. * Copyright (C) International Business Machines Corp., 2000-2004
  3. * Copyright (C) Christoph Hellwig, 2002
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  13. * the GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/capability.h>
  20. #include <linux/fs.h>
  21. #include <linux/xattr.h>
  22. #include <linux/posix_acl_xattr.h>
  23. #include <linux/slab.h>
  24. #include <linux/quotaops.h>
  25. #include <linux/security.h>
  26. #include "jfs_incore.h"
  27. #include "jfs_superblock.h"
  28. #include "jfs_dmap.h"
  29. #include "jfs_debug.h"
  30. #include "jfs_dinode.h"
  31. #include "jfs_extent.h"
  32. #include "jfs_metapage.h"
  33. #include "jfs_xattr.h"
  34. #include "jfs_acl.h"
  35. /*
  36. * jfs_xattr.c: extended attribute service
  37. *
  38. * Overall design --
  39. *
  40. * Format:
  41. *
  42. * Extended attribute lists (jfs_ea_list) consist of an overall size (32 bit
  43. * value) and a variable (0 or more) number of extended attribute
  44. * entries. Each extended attribute entry (jfs_ea) is a <name,value> double
  45. * where <name> is constructed from a null-terminated ascii string
  46. * (1 ... 255 bytes in the name) and <value> is arbitrary 8 bit data
  47. * (1 ... 65535 bytes). The in-memory format is
  48. *
  49. * 0 1 2 4 4 + namelen + 1
  50. * +-------+--------+--------+----------------+-------------------+
  51. * | Flags | Name | Value | Name String \0 | Data . . . . |
  52. * | | Length | Length | | |
  53. * +-------+--------+--------+----------------+-------------------+
  54. *
  55. * A jfs_ea_list then is structured as
  56. *
  57. * 0 4 4 + EA_SIZE(ea1)
  58. * +------------+-------------------+--------------------+-----
  59. * | Overall EA | First FEA Element | Second FEA Element | .....
  60. * | List Size | | |
  61. * +------------+-------------------+--------------------+-----
  62. *
  63. * On-disk:
  64. *
  65. * FEALISTs are stored on disk using blocks allocated by dbAlloc() and
  66. * written directly. An EA list may be in-lined in the inode if there is
  67. * sufficient room available.
  68. */
  69. struct ea_buffer {
  70. int flag; /* Indicates what storage xattr points to */
  71. int max_size; /* largest xattr that fits in current buffer */
  72. dxd_t new_ea; /* dxd to replace ea when modifying xattr */
  73. struct metapage *mp; /* metapage containing ea list */
  74. struct jfs_ea_list *xattr; /* buffer containing ea list */
  75. };
  76. /*
  77. * ea_buffer.flag values
  78. */
  79. #define EA_INLINE 0x0001
  80. #define EA_EXTENT 0x0002
  81. #define EA_NEW 0x0004
  82. #define EA_MALLOC 0x0008
  83. static int is_known_namespace(const char *name)
  84. {
  85. if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
  86. strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
  87. strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
  88. strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
  89. return false;
  90. return true;
  91. }
  92. /*
  93. * These three routines are used to recognize on-disk extended attributes
  94. * that are in a recognized namespace. If the attribute is not recognized,
  95. * "os2." is prepended to the name
  96. */
  97. static int is_os2_xattr(struct jfs_ea *ea)
  98. {
  99. return !is_known_namespace(ea->name);
  100. }
  101. static inline int name_size(struct jfs_ea *ea)
  102. {
  103. if (is_os2_xattr(ea))
  104. return ea->namelen + XATTR_OS2_PREFIX_LEN;
  105. else
  106. return ea->namelen;
  107. }
  108. static inline int copy_name(char *buffer, struct jfs_ea *ea)
  109. {
  110. int len = ea->namelen;
  111. if (is_os2_xattr(ea)) {
  112. memcpy(buffer, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN);
  113. buffer += XATTR_OS2_PREFIX_LEN;
  114. len += XATTR_OS2_PREFIX_LEN;
  115. }
  116. memcpy(buffer, ea->name, ea->namelen);
  117. buffer[ea->namelen] = 0;
  118. return len;
  119. }
  120. /* Forward references */
  121. static void ea_release(struct inode *inode, struct ea_buffer *ea_buf);
  122. /*
  123. * NAME: ea_write_inline
  124. *
  125. * FUNCTION: Attempt to write an EA inline if area is available
  126. *
  127. * PRE CONDITIONS:
  128. * Already verified that the specified EA is small enough to fit inline
  129. *
  130. * PARAMETERS:
  131. * ip - Inode pointer
  132. * ealist - EA list pointer
  133. * size - size of ealist in bytes
  134. * ea - dxd_t structure to be filled in with necessary EA information
  135. * if we successfully copy the EA inline
  136. *
  137. * NOTES:
  138. * Checks if the inode's inline area is available. If so, copies EA inline
  139. * and sets <ea> fields appropriately. Otherwise, returns failure, EA will
  140. * have to be put into an extent.
  141. *
  142. * RETURNS: 0 for successful copy to inline area; -1 if area not available
  143. */
  144. static int ea_write_inline(struct inode *ip, struct jfs_ea_list *ealist,
  145. int size, dxd_t * ea)
  146. {
  147. struct jfs_inode_info *ji = JFS_IP(ip);
  148. /*
  149. * Make sure we have an EA -- the NULL EA list is valid, but you
  150. * can't copy it!
  151. */
  152. if (ealist && size > sizeof (struct jfs_ea_list)) {
  153. assert(size <= sizeof (ji->i_inline_ea));
  154. /*
  155. * See if the space is available or if it is already being
  156. * used for an inline EA.
  157. */
  158. if (!(ji->mode2 & INLINEEA) && !(ji->ea.flag & DXD_INLINE))
  159. return -EPERM;
  160. DXDsize(ea, size);
  161. DXDlength(ea, 0);
  162. DXDaddress(ea, 0);
  163. memcpy(ji->i_inline_ea, ealist, size);
  164. ea->flag = DXD_INLINE;
  165. ji->mode2 &= ~INLINEEA;
  166. } else {
  167. ea->flag = 0;
  168. DXDsize(ea, 0);
  169. DXDlength(ea, 0);
  170. DXDaddress(ea, 0);
  171. /* Free up INLINE area */
  172. if (ji->ea.flag & DXD_INLINE)
  173. ji->mode2 |= INLINEEA;
  174. }
  175. return 0;
  176. }
  177. /*
  178. * NAME: ea_write
  179. *
  180. * FUNCTION: Write an EA for an inode
  181. *
  182. * PRE CONDITIONS: EA has been verified
  183. *
  184. * PARAMETERS:
  185. * ip - Inode pointer
  186. * ealist - EA list pointer
  187. * size - size of ealist in bytes
  188. * ea - dxd_t structure to be filled in appropriately with where the
  189. * EA was copied
  190. *
  191. * NOTES: Will write EA inline if able to, otherwise allocates blocks for an
  192. * extent and synchronously writes it to those blocks.
  193. *
  194. * RETURNS: 0 for success; Anything else indicates failure
  195. */
  196. static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
  197. dxd_t * ea)
  198. {
  199. struct super_block *sb = ip->i_sb;
  200. struct jfs_inode_info *ji = JFS_IP(ip);
  201. struct jfs_sb_info *sbi = JFS_SBI(sb);
  202. int nblocks;
  203. s64 blkno;
  204. int rc = 0, i;
  205. char *cp;
  206. s32 nbytes, nb;
  207. s32 bytes_to_write;
  208. struct metapage *mp;
  209. /*
  210. * Quick check to see if this is an in-linable EA. Short EAs
  211. * and empty EAs are all in-linable, provided the space exists.
  212. */
  213. if (!ealist || size <= sizeof (ji->i_inline_ea)) {
  214. if (!ea_write_inline(ip, ealist, size, ea))
  215. return 0;
  216. }
  217. /* figure out how many blocks we need */
  218. nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
  219. /* Allocate new blocks to quota. */
  220. rc = dquot_alloc_block(ip, nblocks);
  221. if (rc)
  222. return rc;
  223. rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
  224. if (rc) {
  225. /*Rollback quota allocation. */
  226. dquot_free_block(ip, nblocks);
  227. return rc;
  228. }
  229. /*
  230. * Now have nblocks worth of storage to stuff into the FEALIST.
  231. * loop over the FEALIST copying data into the buffer one page at
  232. * a time.
  233. */
  234. cp = (char *) ealist;
  235. nbytes = size;
  236. for (i = 0; i < nblocks; i += sbi->nbperpage) {
  237. /*
  238. * Determine how many bytes for this request, and round up to
  239. * the nearest aggregate block size
  240. */
  241. nb = min(PSIZE, nbytes);
  242. bytes_to_write =
  243. ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
  244. << sb->s_blocksize_bits;
  245. if (!(mp = get_metapage(ip, blkno + i, bytes_to_write, 1))) {
  246. rc = -EIO;
  247. goto failed;
  248. }
  249. memcpy(mp->data, cp, nb);
  250. /*
  251. * We really need a way to propagate errors for
  252. * forced writes like this one. --hch
  253. *
  254. * (__write_metapage => release_metapage => flush_metapage)
  255. */
  256. #ifdef _JFS_FIXME
  257. if ((rc = flush_metapage(mp))) {
  258. /*
  259. * the write failed -- this means that the buffer
  260. * is still assigned and the blocks are not being
  261. * used. this seems like the best error recovery
  262. * we can get ...
  263. */
  264. goto failed;
  265. }
  266. #else
  267. flush_metapage(mp);
  268. #endif
  269. cp += PSIZE;
  270. nbytes -= nb;
  271. }
  272. ea->flag = DXD_EXTENT;
  273. DXDsize(ea, le32_to_cpu(ealist->size));
  274. DXDlength(ea, nblocks);
  275. DXDaddress(ea, blkno);
  276. /* Free up INLINE area */
  277. if (ji->ea.flag & DXD_INLINE)
  278. ji->mode2 |= INLINEEA;
  279. return 0;
  280. failed:
  281. /* Rollback quota allocation. */
  282. dquot_free_block(ip, nblocks);
  283. dbFree(ip, blkno, nblocks);
  284. return rc;
  285. }
  286. /*
  287. * NAME: ea_read_inline
  288. *
  289. * FUNCTION: Read an inlined EA into user's buffer
  290. *
  291. * PARAMETERS:
  292. * ip - Inode pointer
  293. * ealist - Pointer to buffer to fill in with EA
  294. *
  295. * RETURNS: 0
  296. */
  297. static int ea_read_inline(struct inode *ip, struct jfs_ea_list *ealist)
  298. {
  299. struct jfs_inode_info *ji = JFS_IP(ip);
  300. int ea_size = sizeDXD(&ji->ea);
  301. if (ea_size == 0) {
  302. ealist->size = 0;
  303. return 0;
  304. }
  305. /* Sanity Check */
  306. if ((sizeDXD(&ji->ea) > sizeof (ji->i_inline_ea)))
  307. return -EIO;
  308. if (le32_to_cpu(((struct jfs_ea_list *) &ji->i_inline_ea)->size)
  309. != ea_size)
  310. return -EIO;
  311. memcpy(ealist, ji->i_inline_ea, ea_size);
  312. return 0;
  313. }
  314. /*
  315. * NAME: ea_read
  316. *
  317. * FUNCTION: copy EA data into user's buffer
  318. *
  319. * PARAMETERS:
  320. * ip - Inode pointer
  321. * ealist - Pointer to buffer to fill in with EA
  322. *
  323. * NOTES: If EA is inline calls ea_read_inline() to copy EA.
  324. *
  325. * RETURNS: 0 for success; other indicates failure
  326. */
  327. static int ea_read(struct inode *ip, struct jfs_ea_list *ealist)
  328. {
  329. struct super_block *sb = ip->i_sb;
  330. struct jfs_inode_info *ji = JFS_IP(ip);
  331. struct jfs_sb_info *sbi = JFS_SBI(sb);
  332. int nblocks;
  333. s64 blkno;
  334. char *cp = (char *) ealist;
  335. int i;
  336. int nbytes, nb;
  337. s32 bytes_to_read;
  338. struct metapage *mp;
  339. /* quick check for in-line EA */
  340. if (ji->ea.flag & DXD_INLINE)
  341. return ea_read_inline(ip, ealist);
  342. nbytes = sizeDXD(&ji->ea);
  343. if (!nbytes) {
  344. jfs_error(sb, "nbytes is 0\n");
  345. return -EIO;
  346. }
  347. /*
  348. * Figure out how many blocks were allocated when this EA list was
  349. * originally written to disk.
  350. */
  351. nblocks = lengthDXD(&ji->ea) << sbi->l2nbperpage;
  352. blkno = addressDXD(&ji->ea) << sbi->l2nbperpage;
  353. /*
  354. * I have found the disk blocks which were originally used to store
  355. * the FEALIST. now i loop over each contiguous block copying the
  356. * data into the buffer.
  357. */
  358. for (i = 0; i < nblocks; i += sbi->nbperpage) {
  359. /*
  360. * Determine how many bytes for this request, and round up to
  361. * the nearest aggregate block size
  362. */
  363. nb = min(PSIZE, nbytes);
  364. bytes_to_read =
  365. ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
  366. << sb->s_blocksize_bits;
  367. if (!(mp = read_metapage(ip, blkno + i, bytes_to_read, 1)))
  368. return -EIO;
  369. memcpy(cp, mp->data, nb);
  370. release_metapage(mp);
  371. cp += PSIZE;
  372. nbytes -= nb;
  373. }
  374. return 0;
  375. }
  376. /*
  377. * NAME: ea_get
  378. *
  379. * FUNCTION: Returns buffer containing existing extended attributes.
  380. * The size of the buffer will be the larger of the existing
  381. * attributes size, or min_size.
  382. *
  383. * The buffer, which may be inlined in the inode or in the
  384. * page cache must be release by calling ea_release or ea_put
  385. *
  386. * PARAMETERS:
  387. * inode - Inode pointer
  388. * ea_buf - Structure to be populated with ealist and its metadata
  389. * min_size- minimum size of buffer to be returned
  390. *
  391. * RETURNS: 0 for success; Other indicates failure
  392. */
  393. static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
  394. {
  395. struct jfs_inode_info *ji = JFS_IP(inode);
  396. struct super_block *sb = inode->i_sb;
  397. int size;
  398. int ea_size = sizeDXD(&ji->ea);
  399. int blocks_needed, current_blocks;
  400. s64 blkno;
  401. int rc;
  402. int quota_allocation = 0;
  403. /* When fsck.jfs clears a bad ea, it doesn't clear the size */
  404. if (ji->ea.flag == 0)
  405. ea_size = 0;
  406. if (ea_size == 0) {
  407. if (min_size == 0) {
  408. ea_buf->flag = 0;
  409. ea_buf->max_size = 0;
  410. ea_buf->xattr = NULL;
  411. return 0;
  412. }
  413. if ((min_size <= sizeof (ji->i_inline_ea)) &&
  414. (ji->mode2 & INLINEEA)) {
  415. ea_buf->flag = EA_INLINE | EA_NEW;
  416. ea_buf->max_size = sizeof (ji->i_inline_ea);
  417. ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
  418. DXDlength(&ea_buf->new_ea, 0);
  419. DXDaddress(&ea_buf->new_ea, 0);
  420. ea_buf->new_ea.flag = DXD_INLINE;
  421. DXDsize(&ea_buf->new_ea, min_size);
  422. return 0;
  423. }
  424. current_blocks = 0;
  425. } else if (ji->ea.flag & DXD_INLINE) {
  426. if (min_size <= sizeof (ji->i_inline_ea)) {
  427. ea_buf->flag = EA_INLINE;
  428. ea_buf->max_size = sizeof (ji->i_inline_ea);
  429. ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
  430. goto size_check;
  431. }
  432. current_blocks = 0;
  433. } else {
  434. if (!(ji->ea.flag & DXD_EXTENT)) {
  435. jfs_error(sb, "invalid ea.flag\n");
  436. return -EIO;
  437. }
  438. current_blocks = (ea_size + sb->s_blocksize - 1) >>
  439. sb->s_blocksize_bits;
  440. }
  441. size = max(min_size, ea_size);
  442. if (size > PSIZE) {
  443. /*
  444. * To keep the rest of the code simple. Allocate a
  445. * contiguous buffer to work with
  446. */
  447. ea_buf->xattr = kmalloc(size, GFP_KERNEL);
  448. if (ea_buf->xattr == NULL)
  449. return -ENOMEM;
  450. ea_buf->flag = EA_MALLOC;
  451. ea_buf->max_size = (size + sb->s_blocksize - 1) &
  452. ~(sb->s_blocksize - 1);
  453. if (ea_size == 0)
  454. return 0;
  455. if ((rc = ea_read(inode, ea_buf->xattr))) {
  456. kfree(ea_buf->xattr);
  457. ea_buf->xattr = NULL;
  458. return rc;
  459. }
  460. goto size_check;
  461. }
  462. blocks_needed = (min_size + sb->s_blocksize - 1) >>
  463. sb->s_blocksize_bits;
  464. if (blocks_needed > current_blocks) {
  465. /* Allocate new blocks to quota. */
  466. rc = dquot_alloc_block(inode, blocks_needed);
  467. if (rc)
  468. return -EDQUOT;
  469. quota_allocation = blocks_needed;
  470. rc = dbAlloc(inode, INOHINT(inode), (s64) blocks_needed,
  471. &blkno);
  472. if (rc)
  473. goto clean_up;
  474. DXDlength(&ea_buf->new_ea, blocks_needed);
  475. DXDaddress(&ea_buf->new_ea, blkno);
  476. ea_buf->new_ea.flag = DXD_EXTENT;
  477. DXDsize(&ea_buf->new_ea, min_size);
  478. ea_buf->flag = EA_EXTENT | EA_NEW;
  479. ea_buf->mp = get_metapage(inode, blkno,
  480. blocks_needed << sb->s_blocksize_bits,
  481. 1);
  482. if (ea_buf->mp == NULL) {
  483. dbFree(inode, blkno, (s64) blocks_needed);
  484. rc = -EIO;
  485. goto clean_up;
  486. }
  487. ea_buf->xattr = ea_buf->mp->data;
  488. ea_buf->max_size = (min_size + sb->s_blocksize - 1) &
  489. ~(sb->s_blocksize - 1);
  490. if (ea_size == 0)
  491. return 0;
  492. if ((rc = ea_read(inode, ea_buf->xattr))) {
  493. discard_metapage(ea_buf->mp);
  494. dbFree(inode, blkno, (s64) blocks_needed);
  495. goto clean_up;
  496. }
  497. goto size_check;
  498. }
  499. ea_buf->flag = EA_EXTENT;
  500. ea_buf->mp = read_metapage(inode, addressDXD(&ji->ea),
  501. lengthDXD(&ji->ea) << sb->s_blocksize_bits,
  502. 1);
  503. if (ea_buf->mp == NULL) {
  504. rc = -EIO;
  505. goto clean_up;
  506. }
  507. ea_buf->xattr = ea_buf->mp->data;
  508. ea_buf->max_size = (ea_size + sb->s_blocksize - 1) &
  509. ~(sb->s_blocksize - 1);
  510. size_check:
  511. if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
  512. printk(KERN_ERR "ea_get: invalid extended attribute\n");
  513. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
  514. ea_buf->xattr, ea_size, 1);
  515. ea_release(inode, ea_buf);
  516. rc = -EIO;
  517. goto clean_up;
  518. }
  519. return ea_size;
  520. clean_up:
  521. /* Rollback quota allocation */
  522. if (quota_allocation)
  523. dquot_free_block(inode, quota_allocation);
  524. return (rc);
  525. }
  526. static void ea_release(struct inode *inode, struct ea_buffer *ea_buf)
  527. {
  528. if (ea_buf->flag & EA_MALLOC)
  529. kfree(ea_buf->xattr);
  530. else if (ea_buf->flag & EA_EXTENT) {
  531. assert(ea_buf->mp);
  532. release_metapage(ea_buf->mp);
  533. if (ea_buf->flag & EA_NEW)
  534. dbFree(inode, addressDXD(&ea_buf->new_ea),
  535. lengthDXD(&ea_buf->new_ea));
  536. }
  537. }
  538. static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf,
  539. int new_size)
  540. {
  541. struct jfs_inode_info *ji = JFS_IP(inode);
  542. unsigned long old_blocks, new_blocks;
  543. int rc = 0;
  544. if (new_size == 0) {
  545. ea_release(inode, ea_buf);
  546. ea_buf = NULL;
  547. } else if (ea_buf->flag & EA_INLINE) {
  548. assert(new_size <= sizeof (ji->i_inline_ea));
  549. ji->mode2 &= ~INLINEEA;
  550. ea_buf->new_ea.flag = DXD_INLINE;
  551. DXDsize(&ea_buf->new_ea, new_size);
  552. DXDaddress(&ea_buf->new_ea, 0);
  553. DXDlength(&ea_buf->new_ea, 0);
  554. } else if (ea_buf->flag & EA_MALLOC) {
  555. rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
  556. kfree(ea_buf->xattr);
  557. } else if (ea_buf->flag & EA_NEW) {
  558. /* We have already allocated a new dxd */
  559. flush_metapage(ea_buf->mp);
  560. } else {
  561. /* ->xattr must point to original ea's metapage */
  562. rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
  563. discard_metapage(ea_buf->mp);
  564. }
  565. if (rc)
  566. return rc;
  567. old_blocks = new_blocks = 0;
  568. if (ji->ea.flag & DXD_EXTENT) {
  569. invalidate_dxd_metapages(inode, ji->ea);
  570. old_blocks = lengthDXD(&ji->ea);
  571. }
  572. if (ea_buf) {
  573. txEA(tid, inode, &ji->ea, &ea_buf->new_ea);
  574. if (ea_buf->new_ea.flag & DXD_EXTENT) {
  575. new_blocks = lengthDXD(&ea_buf->new_ea);
  576. if (ji->ea.flag & DXD_INLINE)
  577. ji->mode2 |= INLINEEA;
  578. }
  579. ji->ea = ea_buf->new_ea;
  580. } else {
  581. txEA(tid, inode, &ji->ea, NULL);
  582. if (ji->ea.flag & DXD_INLINE)
  583. ji->mode2 |= INLINEEA;
  584. ji->ea.flag = 0;
  585. ji->ea.size = 0;
  586. }
  587. /* If old blocks exist, they must be removed from quota allocation. */
  588. if (old_blocks)
  589. dquot_free_block(inode, old_blocks);
  590. inode->i_ctime = CURRENT_TIME;
  591. return 0;
  592. }
  593. /*
  594. * Most of the permission checking is done by xattr_permission in the vfs.
  595. * We also need to verify that this is a namespace that we recognize.
  596. */
  597. static int can_set_xattr(struct inode *inode, const char *name,
  598. const void *value, size_t value_len)
  599. {
  600. if (!strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)) {
  601. /*
  602. * This makes sure that we aren't trying to set an
  603. * attribute in a different namespace by prefixing it
  604. * with "os2."
  605. */
  606. if (is_known_namespace(name + XATTR_OS2_PREFIX_LEN))
  607. return -EOPNOTSUPP;
  608. return 0;
  609. }
  610. /*
  611. * Don't allow setting an attribute in an unknown namespace.
  612. */
  613. if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) &&
  614. strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
  615. strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
  616. return -EOPNOTSUPP;
  617. return 0;
  618. }
  619. int __jfs_setxattr(tid_t tid, struct inode *inode, const char *name,
  620. const void *value, size_t value_len, int flags)
  621. {
  622. struct jfs_ea_list *ealist;
  623. struct jfs_ea *ea, *old_ea = NULL, *next_ea = NULL;
  624. struct ea_buffer ea_buf;
  625. int old_ea_size = 0;
  626. int xattr_size;
  627. int new_size;
  628. int namelen = strlen(name);
  629. char *os2name = NULL;
  630. int found = 0;
  631. int rc;
  632. int length;
  633. if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
  634. os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1,
  635. GFP_KERNEL);
  636. if (!os2name)
  637. return -ENOMEM;
  638. strcpy(os2name, name + XATTR_OS2_PREFIX_LEN);
  639. name = os2name;
  640. namelen -= XATTR_OS2_PREFIX_LEN;
  641. }
  642. down_write(&JFS_IP(inode)->xattr_sem);
  643. xattr_size = ea_get(inode, &ea_buf, 0);
  644. if (xattr_size < 0) {
  645. rc = xattr_size;
  646. goto out;
  647. }
  648. again:
  649. ealist = (struct jfs_ea_list *) ea_buf.xattr;
  650. new_size = sizeof (struct jfs_ea_list);
  651. if (xattr_size) {
  652. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist);
  653. ea = NEXT_EA(ea)) {
  654. if ((namelen == ea->namelen) &&
  655. (memcmp(name, ea->name, namelen) == 0)) {
  656. found = 1;
  657. if (flags & XATTR_CREATE) {
  658. rc = -EEXIST;
  659. goto release;
  660. }
  661. old_ea = ea;
  662. old_ea_size = EA_SIZE(ea);
  663. next_ea = NEXT_EA(ea);
  664. } else
  665. new_size += EA_SIZE(ea);
  666. }
  667. }
  668. if (!found) {
  669. if (flags & XATTR_REPLACE) {
  670. rc = -ENODATA;
  671. goto release;
  672. }
  673. if (value == NULL) {
  674. rc = 0;
  675. goto release;
  676. }
  677. }
  678. if (value)
  679. new_size += sizeof (struct jfs_ea) + namelen + 1 + value_len;
  680. if (new_size > ea_buf.max_size) {
  681. /*
  682. * We need to allocate more space for merged ea list.
  683. * We should only have loop to again: once.
  684. */
  685. ea_release(inode, &ea_buf);
  686. xattr_size = ea_get(inode, &ea_buf, new_size);
  687. if (xattr_size < 0) {
  688. rc = xattr_size;
  689. goto out;
  690. }
  691. goto again;
  692. }
  693. /* Remove old ea of the same name */
  694. if (found) {
  695. /* number of bytes following target EA */
  696. length = (char *) END_EALIST(ealist) - (char *) next_ea;
  697. if (length > 0)
  698. memmove(old_ea, next_ea, length);
  699. xattr_size -= old_ea_size;
  700. }
  701. /* Add new entry to the end */
  702. if (value) {
  703. if (xattr_size == 0)
  704. /* Completely new ea list */
  705. xattr_size = sizeof (struct jfs_ea_list);
  706. /*
  707. * The size of EA value is limitted by on-disk format up to
  708. * __le16, there would be an overflow if the size is equal
  709. * to XATTR_SIZE_MAX (65536). In order to avoid this issue,
  710. * we can pre-checkup the value size against USHRT_MAX, and
  711. * return -E2BIG in this case, which is consistent with the
  712. * VFS setxattr interface.
  713. */
  714. if (value_len >= USHRT_MAX) {
  715. rc = -E2BIG;
  716. goto release;
  717. }
  718. ea = (struct jfs_ea *) ((char *) ealist + xattr_size);
  719. ea->flag = 0;
  720. ea->namelen = namelen;
  721. ea->valuelen = (cpu_to_le16(value_len));
  722. memcpy(ea->name, name, namelen);
  723. ea->name[namelen] = 0;
  724. if (value_len)
  725. memcpy(&ea->name[namelen + 1], value, value_len);
  726. xattr_size += EA_SIZE(ea);
  727. }
  728. /* DEBUG - If we did this right, these number match */
  729. if (xattr_size != new_size) {
  730. printk(KERN_ERR
  731. "__jfs_setxattr: xattr_size = %d, new_size = %d\n",
  732. xattr_size, new_size);
  733. rc = -EINVAL;
  734. goto release;
  735. }
  736. /*
  737. * If we're left with an empty list, there's no ea
  738. */
  739. if (new_size == sizeof (struct jfs_ea_list))
  740. new_size = 0;
  741. ealist->size = cpu_to_le32(new_size);
  742. rc = ea_put(tid, inode, &ea_buf, new_size);
  743. goto out;
  744. release:
  745. ea_release(inode, &ea_buf);
  746. out:
  747. up_write(&JFS_IP(inode)->xattr_sem);
  748. kfree(os2name);
  749. return rc;
  750. }
  751. int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
  752. size_t value_len, int flags)
  753. {
  754. struct inode *inode = d_inode(dentry);
  755. struct jfs_inode_info *ji = JFS_IP(inode);
  756. int rc;
  757. tid_t tid;
  758. /*
  759. * If this is a request for a synthetic attribute in the system.*
  760. * namespace use the generic infrastructure to resolve a handler
  761. * for it via sb->s_xattr.
  762. */
  763. if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
  764. return generic_setxattr(dentry, name, value, value_len, flags);
  765. if ((rc = can_set_xattr(inode, name, value, value_len)))
  766. return rc;
  767. if (value == NULL) { /* empty EA, do not remove */
  768. value = "";
  769. value_len = 0;
  770. }
  771. tid = txBegin(inode->i_sb, 0);
  772. mutex_lock(&ji->commit_mutex);
  773. rc = __jfs_setxattr(tid, d_inode(dentry), name, value, value_len,
  774. flags);
  775. if (!rc)
  776. rc = txCommit(tid, 1, &inode, 0);
  777. txEnd(tid);
  778. mutex_unlock(&ji->commit_mutex);
  779. return rc;
  780. }
  781. ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
  782. size_t buf_size)
  783. {
  784. struct jfs_ea_list *ealist;
  785. struct jfs_ea *ea;
  786. struct ea_buffer ea_buf;
  787. int xattr_size;
  788. ssize_t size;
  789. int namelen = strlen(name);
  790. char *value;
  791. down_read(&JFS_IP(inode)->xattr_sem);
  792. xattr_size = ea_get(inode, &ea_buf, 0);
  793. if (xattr_size < 0) {
  794. size = xattr_size;
  795. goto out;
  796. }
  797. if (xattr_size == 0)
  798. goto not_found;
  799. ealist = (struct jfs_ea_list *) ea_buf.xattr;
  800. /* Find the named attribute */
  801. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea))
  802. if ((namelen == ea->namelen) &&
  803. memcmp(name, ea->name, namelen) == 0) {
  804. /* Found it */
  805. size = le16_to_cpu(ea->valuelen);
  806. if (!data)
  807. goto release;
  808. else if (size > buf_size) {
  809. size = -ERANGE;
  810. goto release;
  811. }
  812. value = ((char *) &ea->name) + ea->namelen + 1;
  813. memcpy(data, value, size);
  814. goto release;
  815. }
  816. not_found:
  817. size = -ENODATA;
  818. release:
  819. ea_release(inode, &ea_buf);
  820. out:
  821. up_read(&JFS_IP(inode)->xattr_sem);
  822. return size;
  823. }
  824. ssize_t jfs_getxattr(struct dentry *dentry, const char *name, void *data,
  825. size_t buf_size)
  826. {
  827. int err;
  828. /*
  829. * If this is a request for a synthetic attribute in the system.*
  830. * namespace use the generic infrastructure to resolve a handler
  831. * for it via sb->s_xattr.
  832. */
  833. if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
  834. return generic_getxattr(dentry, name, data, buf_size);
  835. if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
  836. /*
  837. * skip past "os2." prefix
  838. */
  839. name += XATTR_OS2_PREFIX_LEN;
  840. /*
  841. * Don't allow retrieving properly prefixed attributes
  842. * by prepending them with "os2."
  843. */
  844. if (is_known_namespace(name))
  845. return -EOPNOTSUPP;
  846. }
  847. err = __jfs_getxattr(d_inode(dentry), name, data, buf_size);
  848. return err;
  849. }
  850. /*
  851. * No special permissions are needed to list attributes except for trusted.*
  852. */
  853. static inline int can_list(struct jfs_ea *ea)
  854. {
  855. return (strncmp(ea->name, XATTR_TRUSTED_PREFIX,
  856. XATTR_TRUSTED_PREFIX_LEN) ||
  857. capable(CAP_SYS_ADMIN));
  858. }
  859. ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
  860. {
  861. struct inode *inode = d_inode(dentry);
  862. char *buffer;
  863. ssize_t size = 0;
  864. int xattr_size;
  865. struct jfs_ea_list *ealist;
  866. struct jfs_ea *ea;
  867. struct ea_buffer ea_buf;
  868. down_read(&JFS_IP(inode)->xattr_sem);
  869. xattr_size = ea_get(inode, &ea_buf, 0);
  870. if (xattr_size < 0) {
  871. size = xattr_size;
  872. goto out;
  873. }
  874. if (xattr_size == 0)
  875. goto release;
  876. ealist = (struct jfs_ea_list *) ea_buf.xattr;
  877. /* compute required size of list */
  878. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
  879. if (can_list(ea))
  880. size += name_size(ea) + 1;
  881. }
  882. if (!data)
  883. goto release;
  884. if (size > buf_size) {
  885. size = -ERANGE;
  886. goto release;
  887. }
  888. /* Copy attribute names to buffer */
  889. buffer = data;
  890. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
  891. if (can_list(ea)) {
  892. int namelen = copy_name(buffer, ea);
  893. buffer += namelen + 1;
  894. }
  895. }
  896. release:
  897. ea_release(inode, &ea_buf);
  898. out:
  899. up_read(&JFS_IP(inode)->xattr_sem);
  900. return size;
  901. }
  902. int jfs_removexattr(struct dentry *dentry, const char *name)
  903. {
  904. struct inode *inode = d_inode(dentry);
  905. struct jfs_inode_info *ji = JFS_IP(inode);
  906. int rc;
  907. tid_t tid;
  908. /*
  909. * If this is a request for a synthetic attribute in the system.*
  910. * namespace use the generic infrastructure to resolve a handler
  911. * for it via sb->s_xattr.
  912. */
  913. if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
  914. return generic_removexattr(dentry, name);
  915. if ((rc = can_set_xattr(inode, name, NULL, 0)))
  916. return rc;
  917. tid = txBegin(inode->i_sb, 0);
  918. mutex_lock(&ji->commit_mutex);
  919. rc = __jfs_setxattr(tid, d_inode(dentry), name, NULL, 0, XATTR_REPLACE);
  920. if (!rc)
  921. rc = txCommit(tid, 1, &inode, 0);
  922. txEnd(tid);
  923. mutex_unlock(&ji->commit_mutex);
  924. return rc;
  925. }
  926. /*
  927. * List of handlers for synthetic system.* attributes. All real ondisk
  928. * attributes are handled directly.
  929. */
  930. const struct xattr_handler *jfs_xattr_handlers[] = {
  931. #ifdef CONFIG_JFS_POSIX_ACL
  932. &posix_acl_access_xattr_handler,
  933. &posix_acl_default_xattr_handler,
  934. #endif
  935. NULL,
  936. };
  937. #ifdef CONFIG_JFS_SECURITY
  938. static int jfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
  939. void *fs_info)
  940. {
  941. const struct xattr *xattr;
  942. tid_t *tid = fs_info;
  943. char *name;
  944. int err = 0;
  945. for (xattr = xattr_array; xattr->name != NULL; xattr++) {
  946. name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
  947. strlen(xattr->name) + 1, GFP_NOFS);
  948. if (!name) {
  949. err = -ENOMEM;
  950. break;
  951. }
  952. strcpy(name, XATTR_SECURITY_PREFIX);
  953. strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
  954. err = __jfs_setxattr(*tid, inode, name,
  955. xattr->value, xattr->value_len, 0);
  956. kfree(name);
  957. if (err < 0)
  958. break;
  959. }
  960. return err;
  961. }
  962. int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir,
  963. const struct qstr *qstr)
  964. {
  965. return security_inode_init_security(inode, dir, qstr,
  966. &jfs_initxattrs, &tid);
  967. }
  968. #endif