radix-tree.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622
  1. /*
  2. * Copyright (C) 2001 Momchil Velikov
  3. * Portions Copyright (C) 2001 Christoph Hellwig
  4. * Copyright (C) 2006 Nick Piggin
  5. * Copyright (C) 2012 Konstantin Khlebnikov
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License as
  9. * published by the Free Software Foundation; either version 2, or (at
  10. * your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  20. */
  21. #ifndef _LINUX_RADIX_TREE_H
  22. #define _LINUX_RADIX_TREE_H
  23. #include <linux/bitops.h>
  24. #include <linux/kernel.h>
  25. #include <linux/list.h>
  26. #include <linux/preempt.h>
  27. #include <linux/rcupdate.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/types.h>
  30. /*
  31. * The bottom two bits of the slot determine how the remaining bits in the
  32. * slot are interpreted:
  33. *
  34. * 00 - data pointer
  35. * 01 - internal entry
  36. * 10 - exceptional entry
  37. * 11 - this bit combination is currently unused/reserved
  38. *
  39. * The internal entry may be a pointer to the next level in the tree, a
  40. * sibling entry, or an indicator that the entry in this slot has been moved
  41. * to another location in the tree and the lookup should be restarted. While
  42. * NULL fits the 'data pointer' pattern, it means that there is no entry in
  43. * the tree for this index (no matter what level of the tree it is found at).
  44. * This means that you cannot store NULL in the tree as a value for the index.
  45. */
  46. #define RADIX_TREE_ENTRY_MASK 3UL
  47. #define RADIX_TREE_INTERNAL_NODE 1UL
  48. /*
  49. * Most users of the radix tree store pointers but shmem/tmpfs stores swap
  50. * entries in the same tree. They are marked as exceptional entries to
  51. * distinguish them from pointers to struct page.
  52. * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
  53. */
  54. #define RADIX_TREE_EXCEPTIONAL_ENTRY 2
  55. #define RADIX_TREE_EXCEPTIONAL_SHIFT 2
  56. static inline bool radix_tree_is_internal_node(void *ptr)
  57. {
  58. return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) ==
  59. RADIX_TREE_INTERNAL_NODE;
  60. }
  61. /*** radix-tree API starts here ***/
  62. #define RADIX_TREE_MAX_TAGS 3
  63. #ifndef RADIX_TREE_MAP_SHIFT
  64. #define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
  65. #endif
  66. #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
  67. #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
  68. #define RADIX_TREE_TAG_LONGS \
  69. ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
  70. #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
  71. #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
  72. RADIX_TREE_MAP_SHIFT))
  73. /*
  74. * @count is the count of every non-NULL element in the ->slots array
  75. * whether that is an exceptional entry, a retry entry, a user pointer,
  76. * a sibling entry or a pointer to the next level of the tree.
  77. * @exceptional is the count of every element in ->slots which is
  78. * either radix_tree_exceptional_entry() or is a sibling entry for an
  79. * exceptional entry.
  80. */
  81. struct radix_tree_node {
  82. unsigned char shift; /* Bits remaining in each slot */
  83. unsigned char offset; /* Slot offset in parent */
  84. unsigned char count; /* Total entry count */
  85. unsigned char exceptional; /* Exceptional entry count */
  86. struct radix_tree_node *parent; /* Used when ascending tree */
  87. struct radix_tree_root *root; /* The tree we belong to */
  88. union {
  89. struct list_head private_list; /* For tree user */
  90. struct rcu_head rcu_head; /* Used when freeing node */
  91. };
  92. void __rcu *slots[RADIX_TREE_MAP_SIZE];
  93. unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
  94. };
  95. /* The IDR tag is stored in the low bits of the GFP flags */
  96. #define ROOT_IS_IDR ((__force gfp_t)4)
  97. /* The top bits of gfp_mask are used to store the root tags */
  98. #define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT)
  99. struct radix_tree_root {
  100. spinlock_t xa_lock;
  101. gfp_t gfp_mask;
  102. struct radix_tree_node __rcu *rnode;
  103. };
  104. #define RADIX_TREE_INIT(name, mask) { \
  105. .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
  106. .gfp_mask = (mask), \
  107. .rnode = NULL, \
  108. }
  109. #define RADIX_TREE(name, mask) \
  110. struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
  111. #define INIT_RADIX_TREE(root, mask) \
  112. do { \
  113. spin_lock_init(&(root)->xa_lock); \
  114. (root)->gfp_mask = (mask); \
  115. (root)->rnode = NULL; \
  116. } while (0)
  117. static inline bool radix_tree_empty(const struct radix_tree_root *root)
  118. {
  119. return root->rnode == NULL;
  120. }
  121. /**
  122. * struct radix_tree_iter - radix tree iterator state
  123. *
  124. * @index: index of current slot
  125. * @next_index: one beyond the last index for this chunk
  126. * @tags: bit-mask for tag-iterating
  127. * @node: node that contains current slot
  128. * @shift: shift for the node that holds our slots
  129. *
  130. * This radix tree iterator works in terms of "chunks" of slots. A chunk is a
  131. * subinterval of slots contained within one radix tree leaf node. It is
  132. * described by a pointer to its first slot and a struct radix_tree_iter
  133. * which holds the chunk's position in the tree and its size. For tagged
  134. * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
  135. * radix tree tag.
  136. */
  137. struct radix_tree_iter {
  138. unsigned long index;
  139. unsigned long next_index;
  140. unsigned long tags;
  141. struct radix_tree_node *node;
  142. #ifdef CONFIG_RADIX_TREE_MULTIORDER
  143. unsigned int shift;
  144. #endif
  145. };
  146. static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
  147. {
  148. #ifdef CONFIG_RADIX_TREE_MULTIORDER
  149. return iter->shift;
  150. #else
  151. return 0;
  152. #endif
  153. }
  154. /**
  155. * Radix-tree synchronization
  156. *
  157. * The radix-tree API requires that users provide all synchronisation (with
  158. * specific exceptions, noted below).
  159. *
  160. * Synchronization of access to the data items being stored in the tree, and
  161. * management of their lifetimes must be completely managed by API users.
  162. *
  163. * For API usage, in general,
  164. * - any function _modifying_ the tree or tags (inserting or deleting
  165. * items, setting or clearing tags) must exclude other modifications, and
  166. * exclude any functions reading the tree.
  167. * - any function _reading_ the tree or tags (looking up items or tags,
  168. * gang lookups) must exclude modifications to the tree, but may occur
  169. * concurrently with other readers.
  170. *
  171. * The notable exceptions to this rule are the following functions:
  172. * __radix_tree_lookup
  173. * radix_tree_lookup
  174. * radix_tree_lookup_slot
  175. * radix_tree_tag_get
  176. * radix_tree_gang_lookup
  177. * radix_tree_gang_lookup_slot
  178. * radix_tree_gang_lookup_tag
  179. * radix_tree_gang_lookup_tag_slot
  180. * radix_tree_tagged
  181. *
  182. * The first 8 functions are able to be called locklessly, using RCU. The
  183. * caller must ensure calls to these functions are made within rcu_read_lock()
  184. * regions. Other readers (lock-free or otherwise) and modifications may be
  185. * running concurrently.
  186. *
  187. * It is still required that the caller manage the synchronization and lifetimes
  188. * of the items. So if RCU lock-free lookups are used, typically this would mean
  189. * that the items have their own locks, or are amenable to lock-free access; and
  190. * that the items are freed by RCU (or only freed after having been deleted from
  191. * the radix tree *and* a synchronize_rcu() grace period).
  192. *
  193. * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
  194. * access to data items when inserting into or looking up from the radix tree)
  195. *
  196. * Note that the value returned by radix_tree_tag_get() may not be relied upon
  197. * if only the RCU read lock is held. Functions to set/clear tags and to
  198. * delete nodes running concurrently with it may affect its result such that
  199. * two consecutive reads in the same locked section may return different
  200. * values. If reliability is required, modification functions must also be
  201. * excluded from concurrency.
  202. *
  203. * radix_tree_tagged is able to be called without locking or RCU.
  204. */
  205. /**
  206. * radix_tree_deref_slot - dereference a slot
  207. * @slot: slot pointer, returned by radix_tree_lookup_slot
  208. *
  209. * For use with radix_tree_lookup_slot(). Caller must hold tree at least read
  210. * locked across slot lookup and dereference. Not required if write lock is
  211. * held (ie. items cannot be concurrently inserted).
  212. *
  213. * radix_tree_deref_retry must be used to confirm validity of the pointer if
  214. * only the read lock is held.
  215. *
  216. * Return: entry stored in that slot.
  217. */
  218. static inline void *radix_tree_deref_slot(void __rcu **slot)
  219. {
  220. return rcu_dereference(*slot);
  221. }
  222. /**
  223. * radix_tree_deref_slot_protected - dereference a slot with tree lock held
  224. * @slot: slot pointer, returned by radix_tree_lookup_slot
  225. *
  226. * Similar to radix_tree_deref_slot. The caller does not hold the RCU read
  227. * lock but it must hold the tree lock to prevent parallel updates.
  228. *
  229. * Return: entry stored in that slot.
  230. */
  231. static inline void *radix_tree_deref_slot_protected(void __rcu **slot,
  232. spinlock_t *treelock)
  233. {
  234. return rcu_dereference_protected(*slot, lockdep_is_held(treelock));
  235. }
  236. /**
  237. * radix_tree_deref_retry - check radix_tree_deref_slot
  238. * @arg: pointer returned by radix_tree_deref_slot
  239. * Returns: 0 if retry is not required, otherwise retry is required
  240. *
  241. * radix_tree_deref_retry must be used with radix_tree_deref_slot.
  242. */
  243. static inline int radix_tree_deref_retry(void *arg)
  244. {
  245. return unlikely(radix_tree_is_internal_node(arg));
  246. }
  247. /**
  248. * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry?
  249. * @arg: value returned by radix_tree_deref_slot
  250. * Returns: 0 if well-aligned pointer, non-0 if exceptional entry.
  251. */
  252. static inline int radix_tree_exceptional_entry(void *arg)
  253. {
  254. /* Not unlikely because radix_tree_exception often tested first */
  255. return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY;
  256. }
  257. /**
  258. * radix_tree_exception - radix_tree_deref_slot returned either exception?
  259. * @arg: value returned by radix_tree_deref_slot
  260. * Returns: 0 if well-aligned pointer, non-0 if either kind of exception.
  261. */
  262. static inline int radix_tree_exception(void *arg)
  263. {
  264. return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
  265. }
  266. int __radix_tree_create(struct radix_tree_root *, unsigned long index,
  267. unsigned order, struct radix_tree_node **nodep,
  268. void __rcu ***slotp);
  269. int __radix_tree_insert(struct radix_tree_root *, unsigned long index,
  270. unsigned order, void *);
  271. static inline int radix_tree_insert(struct radix_tree_root *root,
  272. unsigned long index, void *entry)
  273. {
  274. return __radix_tree_insert(root, index, 0, entry);
  275. }
  276. void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
  277. struct radix_tree_node **nodep, void __rcu ***slotp);
  278. void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
  279. void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *,
  280. unsigned long index);
  281. typedef void (*radix_tree_update_node_t)(struct radix_tree_node *);
  282. void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *,
  283. void __rcu **slot, void *entry,
  284. radix_tree_update_node_t update_node);
  285. void radix_tree_iter_replace(struct radix_tree_root *,
  286. const struct radix_tree_iter *, void __rcu **slot, void *entry);
  287. void radix_tree_replace_slot(struct radix_tree_root *,
  288. void __rcu **slot, void *entry);
  289. void __radix_tree_delete_node(struct radix_tree_root *,
  290. struct radix_tree_node *,
  291. radix_tree_update_node_t update_node);
  292. void radix_tree_iter_delete(struct radix_tree_root *,
  293. struct radix_tree_iter *iter, void __rcu **slot);
  294. void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
  295. void *radix_tree_delete(struct radix_tree_root *, unsigned long);
  296. void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *,
  297. void __rcu **slot);
  298. unsigned int radix_tree_gang_lookup(const struct radix_tree_root *,
  299. void **results, unsigned long first_index,
  300. unsigned int max_items);
  301. unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *,
  302. void __rcu ***results, unsigned long *indices,
  303. unsigned long first_index, unsigned int max_items);
  304. int radix_tree_preload(gfp_t gfp_mask);
  305. int radix_tree_maybe_preload(gfp_t gfp_mask);
  306. int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
  307. void radix_tree_init(void);
  308. void *radix_tree_tag_set(struct radix_tree_root *,
  309. unsigned long index, unsigned int tag);
  310. void *radix_tree_tag_clear(struct radix_tree_root *,
  311. unsigned long index, unsigned int tag);
  312. int radix_tree_tag_get(const struct radix_tree_root *,
  313. unsigned long index, unsigned int tag);
  314. void radix_tree_iter_tag_set(struct radix_tree_root *,
  315. const struct radix_tree_iter *iter, unsigned int tag);
  316. void radix_tree_iter_tag_clear(struct radix_tree_root *,
  317. const struct radix_tree_iter *iter, unsigned int tag);
  318. unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *,
  319. void **results, unsigned long first_index,
  320. unsigned int max_items, unsigned int tag);
  321. unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *,
  322. void __rcu ***results, unsigned long first_index,
  323. unsigned int max_items, unsigned int tag);
  324. int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
  325. static inline void radix_tree_preload_end(void)
  326. {
  327. preempt_enable();
  328. }
  329. int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t);
  330. int radix_tree_split(struct radix_tree_root *, unsigned long index,
  331. unsigned new_order);
  332. int radix_tree_join(struct radix_tree_root *, unsigned long index,
  333. unsigned new_order, void *);
  334. void __rcu **idr_get_free(struct radix_tree_root *root,
  335. struct radix_tree_iter *iter, gfp_t gfp,
  336. unsigned long max);
  337. enum {
  338. RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */
  339. RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */
  340. RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */
  341. };
  342. /**
  343. * radix_tree_iter_init - initialize radix tree iterator
  344. *
  345. * @iter: pointer to iterator state
  346. * @start: iteration starting index
  347. * Returns: NULL
  348. */
  349. static __always_inline void __rcu **
  350. radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
  351. {
  352. /*
  353. * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it
  354. * in the case of a successful tagged chunk lookup. If the lookup was
  355. * unsuccessful or non-tagged then nobody cares about ->tags.
  356. *
  357. * Set index to zero to bypass next_index overflow protection.
  358. * See the comment in radix_tree_next_chunk() for details.
  359. */
  360. iter->index = 0;
  361. iter->next_index = start;
  362. return NULL;
  363. }
  364. /**
  365. * radix_tree_next_chunk - find next chunk of slots for iteration
  366. *
  367. * @root: radix tree root
  368. * @iter: iterator state
  369. * @flags: RADIX_TREE_ITER_* flags and tag index
  370. * Returns: pointer to chunk first slot, or NULL if there no more left
  371. *
  372. * This function looks up the next chunk in the radix tree starting from
  373. * @iter->next_index. It returns a pointer to the chunk's first slot.
  374. * Also it fills @iter with data about chunk: position in the tree (index),
  375. * its end (next_index), and constructs a bit mask for tagged iterating (tags).
  376. */
  377. void __rcu **radix_tree_next_chunk(const struct radix_tree_root *,
  378. struct radix_tree_iter *iter, unsigned flags);
  379. /**
  380. * radix_tree_iter_lookup - look up an index in the radix tree
  381. * @root: radix tree root
  382. * @iter: iterator state
  383. * @index: key to look up
  384. *
  385. * If @index is present in the radix tree, this function returns the slot
  386. * containing it and updates @iter to describe the entry. If @index is not
  387. * present, it returns NULL.
  388. */
  389. static inline void __rcu **
  390. radix_tree_iter_lookup(const struct radix_tree_root *root,
  391. struct radix_tree_iter *iter, unsigned long index)
  392. {
  393. radix_tree_iter_init(iter, index);
  394. return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG);
  395. }
  396. /**
  397. * radix_tree_iter_find - find a present entry
  398. * @root: radix tree root
  399. * @iter: iterator state
  400. * @index: start location
  401. *
  402. * This function returns the slot containing the entry with the lowest index
  403. * which is at least @index. If @index is larger than any present entry, this
  404. * function returns NULL. The @iter is updated to describe the entry found.
  405. */
  406. static inline void __rcu **
  407. radix_tree_iter_find(const struct radix_tree_root *root,
  408. struct radix_tree_iter *iter, unsigned long index)
  409. {
  410. radix_tree_iter_init(iter, index);
  411. return radix_tree_next_chunk(root, iter, 0);
  412. }
  413. /**
  414. * radix_tree_iter_retry - retry this chunk of the iteration
  415. * @iter: iterator state
  416. *
  417. * If we iterate over a tree protected only by the RCU lock, a race
  418. * against deletion or creation may result in seeing a slot for which
  419. * radix_tree_deref_retry() returns true. If so, call this function
  420. * and continue the iteration.
  421. */
  422. static inline __must_check
  423. void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
  424. {
  425. iter->next_index = iter->index;
  426. iter->tags = 0;
  427. return NULL;
  428. }
  429. static inline unsigned long
  430. __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
  431. {
  432. return iter->index + (slots << iter_shift(iter));
  433. }
  434. /**
  435. * radix_tree_iter_resume - resume iterating when the chunk may be invalid
  436. * @slot: pointer to current slot
  437. * @iter: iterator state
  438. * Returns: New slot pointer
  439. *
  440. * If the iterator needs to release then reacquire a lock, the chunk may
  441. * have been invalidated by an insertion or deletion. Call this function
  442. * before releasing the lock to continue the iteration from the next index.
  443. */
  444. void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot,
  445. struct radix_tree_iter *iter);
  446. /**
  447. * radix_tree_chunk_size - get current chunk size
  448. *
  449. * @iter: pointer to radix tree iterator
  450. * Returns: current chunk size
  451. */
  452. static __always_inline long
  453. radix_tree_chunk_size(struct radix_tree_iter *iter)
  454. {
  455. return (iter->next_index - iter->index) >> iter_shift(iter);
  456. }
  457. #ifdef CONFIG_RADIX_TREE_MULTIORDER
  458. void __rcu **__radix_tree_next_slot(void __rcu **slot,
  459. struct radix_tree_iter *iter, unsigned flags);
  460. #else
  461. /* Can't happen without sibling entries, but the compiler can't tell that */
  462. static inline void __rcu **__radix_tree_next_slot(void __rcu **slot,
  463. struct radix_tree_iter *iter, unsigned flags)
  464. {
  465. return slot;
  466. }
  467. #endif
  468. /**
  469. * radix_tree_next_slot - find next slot in chunk
  470. *
  471. * @slot: pointer to current slot
  472. * @iter: pointer to interator state
  473. * @flags: RADIX_TREE_ITER_*, should be constant
  474. * Returns: pointer to next slot, or NULL if there no more left
  475. *
  476. * This function updates @iter->index in the case of a successful lookup.
  477. * For tagged lookup it also eats @iter->tags.
  478. *
  479. * There are several cases where 'slot' can be passed in as NULL to this
  480. * function. These cases result from the use of radix_tree_iter_resume() or
  481. * radix_tree_iter_retry(). In these cases we don't end up dereferencing
  482. * 'slot' because either:
  483. * a) we are doing tagged iteration and iter->tags has been set to 0, or
  484. * b) we are doing non-tagged iteration, and iter->index and iter->next_index
  485. * have been set up so that radix_tree_chunk_size() returns 1 or 0.
  486. */
  487. static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
  488. struct radix_tree_iter *iter, unsigned flags)
  489. {
  490. if (flags & RADIX_TREE_ITER_TAGGED) {
  491. iter->tags >>= 1;
  492. if (unlikely(!iter->tags))
  493. return NULL;
  494. if (likely(iter->tags & 1ul)) {
  495. iter->index = __radix_tree_iter_add(iter, 1);
  496. slot++;
  497. goto found;
  498. }
  499. if (!(flags & RADIX_TREE_ITER_CONTIG)) {
  500. unsigned offset = __ffs(iter->tags);
  501. iter->tags >>= offset++;
  502. iter->index = __radix_tree_iter_add(iter, offset);
  503. slot += offset;
  504. goto found;
  505. }
  506. } else {
  507. long count = radix_tree_chunk_size(iter);
  508. while (--count > 0) {
  509. slot++;
  510. iter->index = __radix_tree_iter_add(iter, 1);
  511. if (likely(*slot))
  512. goto found;
  513. if (flags & RADIX_TREE_ITER_CONTIG) {
  514. /* forbid switching to the next chunk */
  515. iter->next_index = 0;
  516. break;
  517. }
  518. }
  519. }
  520. return NULL;
  521. found:
  522. if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot))))
  523. return __radix_tree_next_slot(slot, iter, flags);
  524. return slot;
  525. }
  526. /**
  527. * radix_tree_for_each_slot - iterate over non-empty slots
  528. *
  529. * @slot: the void** variable for pointer to slot
  530. * @root: the struct radix_tree_root pointer
  531. * @iter: the struct radix_tree_iter pointer
  532. * @start: iteration starting index
  533. *
  534. * @slot points to radix tree slot, @iter->index contains its index.
  535. */
  536. #define radix_tree_for_each_slot(slot, root, iter, start) \
  537. for (slot = radix_tree_iter_init(iter, start) ; \
  538. slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \
  539. slot = radix_tree_next_slot(slot, iter, 0))
  540. /**
  541. * radix_tree_for_each_contig - iterate over contiguous slots
  542. *
  543. * @slot: the void** variable for pointer to slot
  544. * @root: the struct radix_tree_root pointer
  545. * @iter: the struct radix_tree_iter pointer
  546. * @start: iteration starting index
  547. *
  548. * @slot points to radix tree slot, @iter->index contains its index.
  549. */
  550. #define radix_tree_for_each_contig(slot, root, iter, start) \
  551. for (slot = radix_tree_iter_init(iter, start) ; \
  552. slot || (slot = radix_tree_next_chunk(root, iter, \
  553. RADIX_TREE_ITER_CONTIG)) ; \
  554. slot = radix_tree_next_slot(slot, iter, \
  555. RADIX_TREE_ITER_CONTIG))
  556. /**
  557. * radix_tree_for_each_tagged - iterate over tagged slots
  558. *
  559. * @slot: the void** variable for pointer to slot
  560. * @root: the struct radix_tree_root pointer
  561. * @iter: the struct radix_tree_iter pointer
  562. * @start: iteration starting index
  563. * @tag: tag index
  564. *
  565. * @slot points to radix tree slot, @iter->index contains its index.
  566. */
  567. #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \
  568. for (slot = radix_tree_iter_init(iter, start) ; \
  569. slot || (slot = radix_tree_next_chunk(root, iter, \
  570. RADIX_TREE_ITER_TAGGED | tag)) ; \
  571. slot = radix_tree_next_slot(slot, iter, \
  572. RADIX_TREE_ITER_TAGGED | tag))
  573. #endif /* _LINUX_RADIX_TREE_H */