xattr.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/drivers/staging/erofs/xattr.c
  4. *
  5. * Copyright (C) 2017-2018 HUAWEI, Inc.
  6. * http://www.huawei.com/
  7. * Created by Gao Xiang <gaoxiang25@huawei.com>
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file COPYING in the main directory of the Linux
  11. * distribution for more details.
  12. */
  13. #include <linux/security.h>
  14. #include "xattr.h"
  15. struct xattr_iter {
  16. struct super_block *sb;
  17. struct page *page;
  18. void *kaddr;
  19. erofs_blk_t blkaddr;
  20. unsigned ofs;
  21. };
  22. static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
  23. {
  24. /* the only user of kunmap() is 'init_inode_xattrs' */
  25. if (unlikely(!atomic))
  26. kunmap(it->page);
  27. else
  28. kunmap_atomic(it->kaddr);
  29. unlock_page(it->page);
  30. put_page(it->page);
  31. }
  32. static inline void xattr_iter_end_final(struct xattr_iter *it)
  33. {
  34. if (!it->page)
  35. return;
  36. xattr_iter_end(it, true);
  37. }
  38. static int init_inode_xattrs(struct inode *inode)
  39. {
  40. struct erofs_vnode *const vi = EROFS_V(inode);
  41. struct xattr_iter it;
  42. unsigned i;
  43. struct erofs_xattr_ibody_header *ih;
  44. struct erofs_sb_info *sbi;
  45. bool atomic_map;
  46. int ret = 0;
  47. /* the most case is that xattrs of this inode are initialized. */
  48. if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
  49. return 0;
  50. if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
  51. return -ERESTARTSYS;
  52. /* someone has initialized xattrs for us? */
  53. if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
  54. goto out_unlock;
  55. /*
  56. * bypass all xattr operations if ->xattr_isize is not greater than
  57. * sizeof(struct erofs_xattr_ibody_header), in detail:
  58. * 1) it is not enough to contain erofs_xattr_ibody_header then
  59. * ->xattr_isize should be 0 (it means no xattr);
  60. * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
  61. * undefined right now (maybe use later with some new sb feature).
  62. */
  63. if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
  64. errln("xattr_isize %d of nid %llu is not supported yet",
  65. vi->xattr_isize, vi->nid);
  66. ret = -ENOTSUPP;
  67. goto out_unlock;
  68. } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
  69. if (unlikely(vi->xattr_isize)) {
  70. DBG_BUGON(1);
  71. ret = -EIO;
  72. goto out_unlock; /* xattr ondisk layout error */
  73. }
  74. ret = -ENOATTR;
  75. goto out_unlock;
  76. }
  77. sbi = EROFS_I_SB(inode);
  78. it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
  79. it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
  80. it.page = erofs_get_inline_page(inode, it.blkaddr);
  81. if (IS_ERR(it.page)) {
  82. ret = PTR_ERR(it.page);
  83. goto out_unlock;
  84. }
  85. /* read in shared xattr array (non-atomic, see kmalloc below) */
  86. it.kaddr = kmap(it.page);
  87. atomic_map = false;
  88. ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
  89. vi->xattr_shared_count = ih->h_shared_count;
  90. vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
  91. sizeof(uint), GFP_KERNEL);
  92. if (!vi->xattr_shared_xattrs) {
  93. xattr_iter_end(&it, atomic_map);
  94. ret = -ENOMEM;
  95. goto out_unlock;
  96. }
  97. /* let's skip ibody header */
  98. it.ofs += sizeof(struct erofs_xattr_ibody_header);
  99. for (i = 0; i < vi->xattr_shared_count; ++i) {
  100. if (unlikely(it.ofs >= EROFS_BLKSIZ)) {
  101. /* cannot be unaligned */
  102. BUG_ON(it.ofs != EROFS_BLKSIZ);
  103. xattr_iter_end(&it, atomic_map);
  104. it.page = erofs_get_meta_page(inode->i_sb,
  105. ++it.blkaddr, S_ISDIR(inode->i_mode));
  106. if (IS_ERR(it.page)) {
  107. kfree(vi->xattr_shared_xattrs);
  108. vi->xattr_shared_xattrs = NULL;
  109. ret = PTR_ERR(it.page);
  110. goto out_unlock;
  111. }
  112. it.kaddr = kmap_atomic(it.page);
  113. atomic_map = true;
  114. it.ofs = 0;
  115. }
  116. vi->xattr_shared_xattrs[i] =
  117. le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
  118. it.ofs += sizeof(__le32);
  119. }
  120. xattr_iter_end(&it, atomic_map);
  121. set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);
  122. out_unlock:
  123. clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
  124. return ret;
  125. }
  126. struct xattr_iter_handlers {
  127. int (*entry)(struct xattr_iter *, struct erofs_xattr_entry *);
  128. int (*name)(struct xattr_iter *, unsigned, char *, unsigned);
  129. int (*alloc_buffer)(struct xattr_iter *, unsigned);
  130. void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
  131. };
  132. static inline int xattr_iter_fixup(struct xattr_iter *it)
  133. {
  134. if (it->ofs < EROFS_BLKSIZ)
  135. return 0;
  136. xattr_iter_end(it, true);
  137. it->blkaddr += erofs_blknr(it->ofs);
  138. it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
  139. if (IS_ERR(it->page)) {
  140. int err = PTR_ERR(it->page);
  141. it->page = NULL;
  142. return err;
  143. }
  144. it->kaddr = kmap_atomic(it->page);
  145. it->ofs = erofs_blkoff(it->ofs);
  146. return 0;
  147. }
  148. static int inline_xattr_iter_begin(struct xattr_iter *it,
  149. struct inode *inode)
  150. {
  151. struct erofs_vnode *const vi = EROFS_V(inode);
  152. struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
  153. unsigned xattr_header_sz, inline_xattr_ofs;
  154. xattr_header_sz = inlinexattr_header_size(inode);
  155. if (unlikely(xattr_header_sz >= vi->xattr_isize)) {
  156. BUG_ON(xattr_header_sz > vi->xattr_isize);
  157. return -ENOATTR;
  158. }
  159. inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
  160. it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
  161. it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
  162. it->page = erofs_get_inline_page(inode, it->blkaddr);
  163. if (IS_ERR(it->page))
  164. return PTR_ERR(it->page);
  165. it->kaddr = kmap_atomic(it->page);
  166. return vi->xattr_isize - xattr_header_sz;
  167. }
  168. static int xattr_foreach(struct xattr_iter *it,
  169. const struct xattr_iter_handlers *op, unsigned int *tlimit)
  170. {
  171. struct erofs_xattr_entry entry;
  172. unsigned value_sz, processed, slice;
  173. int err;
  174. /* 0. fixup blkaddr, ofs, ipage */
  175. err = xattr_iter_fixup(it);
  176. if (err)
  177. return err;
  178. /*
  179. * 1. read xattr entry to the memory,
  180. * since we do EROFS_XATTR_ALIGN
  181. * therefore entry should be in the page
  182. */
  183. entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
  184. if (tlimit != NULL) {
  185. unsigned entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry);
  186. BUG_ON(*tlimit < entry_sz);
  187. *tlimit -= entry_sz;
  188. }
  189. it->ofs += sizeof(struct erofs_xattr_entry);
  190. value_sz = le16_to_cpu(entry.e_value_size);
  191. /* handle entry */
  192. err = op->entry(it, &entry);
  193. if (err) {
  194. it->ofs += entry.e_name_len + value_sz;
  195. goto out;
  196. }
  197. /* 2. handle xattr name (ofs will finally be at the end of name) */
  198. processed = 0;
  199. while (processed < entry.e_name_len) {
  200. if (it->ofs >= EROFS_BLKSIZ) {
  201. BUG_ON(it->ofs > EROFS_BLKSIZ);
  202. err = xattr_iter_fixup(it);
  203. if (err)
  204. goto out;
  205. it->ofs = 0;
  206. }
  207. slice = min_t(unsigned, PAGE_SIZE - it->ofs,
  208. entry.e_name_len - processed);
  209. /* handle name */
  210. err = op->name(it, processed, it->kaddr + it->ofs, slice);
  211. if (err) {
  212. it->ofs += entry.e_name_len - processed + value_sz;
  213. goto out;
  214. }
  215. it->ofs += slice;
  216. processed += slice;
  217. }
  218. /* 3. handle xattr value */
  219. processed = 0;
  220. if (op->alloc_buffer != NULL) {
  221. err = op->alloc_buffer(it, value_sz);
  222. if (err) {
  223. it->ofs += value_sz;
  224. goto out;
  225. }
  226. }
  227. while (processed < value_sz) {
  228. if (it->ofs >= EROFS_BLKSIZ) {
  229. BUG_ON(it->ofs > EROFS_BLKSIZ);
  230. err = xattr_iter_fixup(it);
  231. if (err)
  232. goto out;
  233. it->ofs = 0;
  234. }
  235. slice = min_t(unsigned, PAGE_SIZE - it->ofs,
  236. value_sz - processed);
  237. op->value(it, processed, it->kaddr + it->ofs, slice);
  238. it->ofs += slice;
  239. processed += slice;
  240. }
  241. out:
  242. /* we assume that ofs is aligned with 4 bytes */
  243. it->ofs = EROFS_XATTR_ALIGN(it->ofs);
  244. return err;
  245. }
  246. struct getxattr_iter {
  247. struct xattr_iter it;
  248. char *buffer;
  249. int buffer_size, index;
  250. struct qstr name;
  251. };
  252. static int xattr_entrymatch(struct xattr_iter *_it,
  253. struct erofs_xattr_entry *entry)
  254. {
  255. struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
  256. return (it->index != entry->e_name_index ||
  257. it->name.len != entry->e_name_len) ? -ENOATTR : 0;
  258. }
  259. static int xattr_namematch(struct xattr_iter *_it,
  260. unsigned processed, char *buf, unsigned len)
  261. {
  262. struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
  263. return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
  264. }
  265. static int xattr_checkbuffer(struct xattr_iter *_it,
  266. unsigned value_sz)
  267. {
  268. struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
  269. int err = it->buffer_size < value_sz ? -ERANGE : 0;
  270. it->buffer_size = value_sz;
  271. return it->buffer == NULL ? 1 : err;
  272. }
  273. static void xattr_copyvalue(struct xattr_iter *_it,
  274. unsigned processed, char *buf, unsigned len)
  275. {
  276. struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
  277. memcpy(it->buffer + processed, buf, len);
  278. }
  279. static const struct xattr_iter_handlers find_xattr_handlers = {
  280. .entry = xattr_entrymatch,
  281. .name = xattr_namematch,
  282. .alloc_buffer = xattr_checkbuffer,
  283. .value = xattr_copyvalue
  284. };
  285. static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
  286. {
  287. int ret;
  288. unsigned remaining;
  289. ret = inline_xattr_iter_begin(&it->it, inode);
  290. if (ret < 0)
  291. return ret;
  292. remaining = ret;
  293. while (remaining) {
  294. ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
  295. if (ret >= 0)
  296. break;
  297. if (ret != -ENOATTR) /* -ENOMEM, -EIO, etc. */
  298. break;
  299. }
  300. xattr_iter_end_final(&it->it);
  301. return ret < 0 ? ret : it->buffer_size;
  302. }
  303. static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
  304. {
  305. struct erofs_vnode *const vi = EROFS_V(inode);
  306. struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
  307. unsigned i;
  308. int ret = -ENOATTR;
  309. for (i = 0; i < vi->xattr_shared_count; ++i) {
  310. erofs_blk_t blkaddr =
  311. xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
  312. it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
  313. if (!i || blkaddr != it->it.blkaddr) {
  314. if (i)
  315. xattr_iter_end(&it->it, true);
  316. it->it.page = erofs_get_meta_page(inode->i_sb,
  317. blkaddr, false);
  318. if (IS_ERR(it->it.page))
  319. return PTR_ERR(it->it.page);
  320. it->it.kaddr = kmap_atomic(it->it.page);
  321. it->it.blkaddr = blkaddr;
  322. }
  323. ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
  324. if (ret >= 0)
  325. break;
  326. if (ret != -ENOATTR) /* -ENOMEM, -EIO, etc. */
  327. break;
  328. }
  329. if (vi->xattr_shared_count)
  330. xattr_iter_end_final(&it->it);
  331. return ret < 0 ? ret : it->buffer_size;
  332. }
  333. static bool erofs_xattr_user_list(struct dentry *dentry)
  334. {
  335. return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER);
  336. }
  337. static bool erofs_xattr_trusted_list(struct dentry *dentry)
  338. {
  339. return capable(CAP_SYS_ADMIN);
  340. }
  341. int erofs_getxattr(struct inode *inode, int index,
  342. const char *name,
  343. void *buffer, size_t buffer_size)
  344. {
  345. int ret;
  346. struct getxattr_iter it;
  347. if (unlikely(name == NULL))
  348. return -EINVAL;
  349. ret = init_inode_xattrs(inode);
  350. if (ret)
  351. return ret;
  352. it.index = index;
  353. it.name.len = strlen(name);
  354. if (it.name.len > EROFS_NAME_LEN)
  355. return -ERANGE;
  356. it.name.name = name;
  357. it.buffer = buffer;
  358. it.buffer_size = buffer_size;
  359. it.it.sb = inode->i_sb;
  360. ret = inline_getxattr(inode, &it);
  361. if (ret == -ENOATTR)
  362. ret = shared_getxattr(inode, &it);
  363. return ret;
  364. }
  365. static int erofs_xattr_generic_get(const struct xattr_handler *handler,
  366. struct dentry *unused, struct inode *inode,
  367. const char *name, void *buffer, size_t size)
  368. {
  369. struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
  370. switch (handler->flags) {
  371. case EROFS_XATTR_INDEX_USER:
  372. if (!test_opt(sbi, XATTR_USER))
  373. return -EOPNOTSUPP;
  374. break;
  375. case EROFS_XATTR_INDEX_TRUSTED:
  376. if (!capable(CAP_SYS_ADMIN))
  377. return -EPERM;
  378. break;
  379. case EROFS_XATTR_INDEX_SECURITY:
  380. break;
  381. default:
  382. return -EINVAL;
  383. }
  384. return erofs_getxattr(inode, handler->flags, name, buffer, size);
  385. }
  386. const struct xattr_handler erofs_xattr_user_handler = {
  387. .prefix = XATTR_USER_PREFIX,
  388. .flags = EROFS_XATTR_INDEX_USER,
  389. .list = erofs_xattr_user_list,
  390. .get = erofs_xattr_generic_get,
  391. };
  392. const struct xattr_handler erofs_xattr_trusted_handler = {
  393. .prefix = XATTR_TRUSTED_PREFIX,
  394. .flags = EROFS_XATTR_INDEX_TRUSTED,
  395. .list = erofs_xattr_trusted_list,
  396. .get = erofs_xattr_generic_get,
  397. };
  398. #ifdef CONFIG_EROFS_FS_SECURITY
  399. const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
  400. .prefix = XATTR_SECURITY_PREFIX,
  401. .flags = EROFS_XATTR_INDEX_SECURITY,
  402. .get = erofs_xattr_generic_get,
  403. };
  404. #endif
  405. const struct xattr_handler *erofs_xattr_handlers[] = {
  406. &erofs_xattr_user_handler,
  407. #ifdef CONFIG_EROFS_FS_POSIX_ACL
  408. &posix_acl_access_xattr_handler,
  409. &posix_acl_default_xattr_handler,
  410. #endif
  411. &erofs_xattr_trusted_handler,
  412. #ifdef CONFIG_EROFS_FS_SECURITY
  413. &erofs_xattr_security_handler,
  414. #endif
  415. NULL,
  416. };
  417. struct listxattr_iter {
  418. struct xattr_iter it;
  419. struct dentry *dentry;
  420. char *buffer;
  421. int buffer_size, buffer_ofs;
  422. };
  423. static int xattr_entrylist(struct xattr_iter *_it,
  424. struct erofs_xattr_entry *entry)
  425. {
  426. struct listxattr_iter *it =
  427. container_of(_it, struct listxattr_iter, it);
  428. unsigned prefix_len;
  429. const char *prefix;
  430. const struct xattr_handler *h =
  431. erofs_xattr_handler(entry->e_name_index);
  432. if (h == NULL || (h->list != NULL && !h->list(it->dentry)))
  433. return 1;
  434. /* Note that at least one of 'prefix' and 'name' should be non-NULL */
  435. prefix = h->prefix != NULL ? h->prefix : h->name;
  436. prefix_len = strlen(prefix);
  437. if (it->buffer == NULL) {
  438. it->buffer_ofs += prefix_len + entry->e_name_len + 1;
  439. return 1;
  440. }
  441. if (it->buffer_ofs + prefix_len
  442. + entry->e_name_len + 1 > it->buffer_size)
  443. return -ERANGE;
  444. memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
  445. it->buffer_ofs += prefix_len;
  446. return 0;
  447. }
  448. static int xattr_namelist(struct xattr_iter *_it,
  449. unsigned processed, char *buf, unsigned len)
  450. {
  451. struct listxattr_iter *it =
  452. container_of(_it, struct listxattr_iter, it);
  453. memcpy(it->buffer + it->buffer_ofs, buf, len);
  454. it->buffer_ofs += len;
  455. return 0;
  456. }
  457. static int xattr_skipvalue(struct xattr_iter *_it,
  458. unsigned value_sz)
  459. {
  460. struct listxattr_iter *it =
  461. container_of(_it, struct listxattr_iter, it);
  462. it->buffer[it->buffer_ofs++] = '\0';
  463. return 1;
  464. }
  465. static const struct xattr_iter_handlers list_xattr_handlers = {
  466. .entry = xattr_entrylist,
  467. .name = xattr_namelist,
  468. .alloc_buffer = xattr_skipvalue,
  469. .value = NULL
  470. };
  471. static int inline_listxattr(struct listxattr_iter *it)
  472. {
  473. int ret;
  474. unsigned remaining;
  475. ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
  476. if (ret < 0)
  477. return ret;
  478. remaining = ret;
  479. while (remaining) {
  480. ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
  481. if (ret < 0)
  482. break;
  483. }
  484. xattr_iter_end_final(&it->it);
  485. return ret < 0 ? ret : it->buffer_ofs;
  486. }
  487. static int shared_listxattr(struct listxattr_iter *it)
  488. {
  489. struct inode *const inode = d_inode(it->dentry);
  490. struct erofs_vnode *const vi = EROFS_V(inode);
  491. struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
  492. unsigned i;
  493. int ret = 0;
  494. for (i = 0; i < vi->xattr_shared_count; ++i) {
  495. erofs_blk_t blkaddr =
  496. xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
  497. it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
  498. if (!i || blkaddr != it->it.blkaddr) {
  499. if (i)
  500. xattr_iter_end(&it->it, true);
  501. it->it.page = erofs_get_meta_page(inode->i_sb,
  502. blkaddr, false);
  503. if (IS_ERR(it->it.page))
  504. return PTR_ERR(it->it.page);
  505. it->it.kaddr = kmap_atomic(it->it.page);
  506. it->it.blkaddr = blkaddr;
  507. }
  508. ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
  509. if (ret < 0)
  510. break;
  511. }
  512. if (vi->xattr_shared_count)
  513. xattr_iter_end_final(&it->it);
  514. return ret < 0 ? ret : it->buffer_ofs;
  515. }
  516. ssize_t erofs_listxattr(struct dentry *dentry,
  517. char *buffer, size_t buffer_size)
  518. {
  519. int ret;
  520. struct listxattr_iter it;
  521. ret = init_inode_xattrs(d_inode(dentry));
  522. if (ret == -ENOATTR)
  523. return 0;
  524. if (ret)
  525. return ret;
  526. it.dentry = dentry;
  527. it.buffer = buffer;
  528. it.buffer_size = buffer_size;
  529. it.buffer_ofs = 0;
  530. it.it.sb = dentry->d_sb;
  531. ret = inline_listxattr(&it);
  532. if (ret < 0 && ret != -ENOATTR)
  533. return ret;
  534. return shared_listxattr(&it);
  535. }