assoc_array.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724
  1. /* Generic associative array implementation.
  2. *
  3. * See Documentation/core-api/assoc_array.rst for information.
  4. *
  5. * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
  6. * Written by David Howells (dhowells@redhat.com)
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public Licence
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the Licence, or (at your option) any later version.
  12. */
  13. //#define DEBUG
  14. #include <linux/rcupdate.h>
  15. #include <linux/slab.h>
  16. #include <linux/err.h>
  17. #include <linux/assoc_array_priv.h>
  18. /*
  19. * Iterate over an associative array. The caller must hold the RCU read lock
  20. * or better.
  21. */
  22. static int assoc_array_subtree_iterate(const struct assoc_array_ptr *root,
  23. const struct assoc_array_ptr *stop,
  24. int (*iterator)(const void *leaf,
  25. void *iterator_data),
  26. void *iterator_data)
  27. {
  28. const struct assoc_array_shortcut *shortcut;
  29. const struct assoc_array_node *node;
  30. const struct assoc_array_ptr *cursor, *ptr, *parent;
  31. unsigned long has_meta;
  32. int slot, ret;
  33. cursor = root;
  34. begin_node:
  35. if (assoc_array_ptr_is_shortcut(cursor)) {
  36. /* Descend through a shortcut */
  37. shortcut = assoc_array_ptr_to_shortcut(cursor);
  38. cursor = READ_ONCE(shortcut->next_node); /* Address dependency. */
  39. }
  40. node = assoc_array_ptr_to_node(cursor);
  41. slot = 0;
  42. /* We perform two passes of each node.
  43. *
  44. * The first pass does all the leaves in this node. This means we
  45. * don't miss any leaves if the node is split up by insertion whilst
  46. * we're iterating over the branches rooted here (we may, however, see
  47. * some leaves twice).
  48. */
  49. has_meta = 0;
  50. for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  51. ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
  52. has_meta |= (unsigned long)ptr;
  53. if (ptr && assoc_array_ptr_is_leaf(ptr)) {
  54. /* We need a barrier between the read of the pointer,
  55. * which is supplied by the above READ_ONCE().
  56. */
  57. /* Invoke the callback */
  58. ret = iterator(assoc_array_ptr_to_leaf(ptr),
  59. iterator_data);
  60. if (ret)
  61. return ret;
  62. }
  63. }
  64. /* The second pass attends to all the metadata pointers. If we follow
  65. * one of these we may find that we don't come back here, but rather go
  66. * back to a replacement node with the leaves in a different layout.
  67. *
  68. * We are guaranteed to make progress, however, as the slot number for
  69. * a particular portion of the key space cannot change - and we
  70. * continue at the back pointer + 1.
  71. */
  72. if (!(has_meta & ASSOC_ARRAY_PTR_META_TYPE))
  73. goto finished_node;
  74. slot = 0;
  75. continue_node:
  76. node = assoc_array_ptr_to_node(cursor);
  77. for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  78. ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
  79. if (assoc_array_ptr_is_meta(ptr)) {
  80. cursor = ptr;
  81. goto begin_node;
  82. }
  83. }
  84. finished_node:
  85. /* Move up to the parent (may need to skip back over a shortcut) */
  86. parent = READ_ONCE(node->back_pointer); /* Address dependency. */
  87. slot = node->parent_slot;
  88. if (parent == stop)
  89. return 0;
  90. if (assoc_array_ptr_is_shortcut(parent)) {
  91. shortcut = assoc_array_ptr_to_shortcut(parent);
  92. cursor = parent;
  93. parent = READ_ONCE(shortcut->back_pointer); /* Address dependency. */
  94. slot = shortcut->parent_slot;
  95. if (parent == stop)
  96. return 0;
  97. }
  98. /* Ascend to next slot in parent node */
  99. cursor = parent;
  100. slot++;
  101. goto continue_node;
  102. }
  103. /**
  104. * assoc_array_iterate - Pass all objects in the array to a callback
  105. * @array: The array to iterate over.
  106. * @iterator: The callback function.
  107. * @iterator_data: Private data for the callback function.
  108. *
  109. * Iterate over all the objects in an associative array. Each one will be
  110. * presented to the iterator function.
  111. *
  112. * If the array is being modified concurrently with the iteration then it is
  113. * possible that some objects in the array will be passed to the iterator
  114. * callback more than once - though every object should be passed at least
  115. * once. If this is undesirable then the caller must lock against modification
  116. * for the duration of this function.
  117. *
  118. * The function will return 0 if no objects were in the array or else it will
  119. * return the result of the last iterator function called. Iteration stops
  120. * immediately if any call to the iteration function results in a non-zero
  121. * return.
  122. *
  123. * The caller should hold the RCU read lock or better if concurrent
  124. * modification is possible.
  125. */
  126. int assoc_array_iterate(const struct assoc_array *array,
  127. int (*iterator)(const void *object,
  128. void *iterator_data),
  129. void *iterator_data)
  130. {
  131. struct assoc_array_ptr *root = READ_ONCE(array->root); /* Address dependency. */
  132. if (!root)
  133. return 0;
  134. return assoc_array_subtree_iterate(root, NULL, iterator, iterator_data);
  135. }
  136. enum assoc_array_walk_status {
  137. assoc_array_walk_tree_empty,
  138. assoc_array_walk_found_terminal_node,
  139. assoc_array_walk_found_wrong_shortcut,
  140. };
  141. struct assoc_array_walk_result {
  142. struct {
  143. struct assoc_array_node *node; /* Node in which leaf might be found */
  144. int level;
  145. int slot;
  146. } terminal_node;
  147. struct {
  148. struct assoc_array_shortcut *shortcut;
  149. int level;
  150. int sc_level;
  151. unsigned long sc_segments;
  152. unsigned long dissimilarity;
  153. } wrong_shortcut;
  154. };
  155. /*
  156. * Navigate through the internal tree looking for the closest node to the key.
  157. */
  158. static enum assoc_array_walk_status
  159. assoc_array_walk(const struct assoc_array *array,
  160. const struct assoc_array_ops *ops,
  161. const void *index_key,
  162. struct assoc_array_walk_result *result)
  163. {
  164. struct assoc_array_shortcut *shortcut;
  165. struct assoc_array_node *node;
  166. struct assoc_array_ptr *cursor, *ptr;
  167. unsigned long sc_segments, dissimilarity;
  168. unsigned long segments;
  169. int level, sc_level, next_sc_level;
  170. int slot;
  171. pr_devel("-->%s()\n", __func__);
  172. cursor = READ_ONCE(array->root); /* Address dependency. */
  173. if (!cursor)
  174. return assoc_array_walk_tree_empty;
  175. level = 0;
  176. /* Use segments from the key for the new leaf to navigate through the
  177. * internal tree, skipping through nodes and shortcuts that are on
  178. * route to the destination. Eventually we'll come to a slot that is
  179. * either empty or contains a leaf at which point we've found a node in
  180. * which the leaf we're looking for might be found or into which it
  181. * should be inserted.
  182. */
  183. jumped:
  184. segments = ops->get_key_chunk(index_key, level);
  185. pr_devel("segments[%d]: %lx\n", level, segments);
  186. if (assoc_array_ptr_is_shortcut(cursor))
  187. goto follow_shortcut;
  188. consider_node:
  189. node = assoc_array_ptr_to_node(cursor);
  190. slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
  191. slot &= ASSOC_ARRAY_FAN_MASK;
  192. ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
  193. pr_devel("consider slot %x [ix=%d type=%lu]\n",
  194. slot, level, (unsigned long)ptr & 3);
  195. if (!assoc_array_ptr_is_meta(ptr)) {
  196. /* The node doesn't have a node/shortcut pointer in the slot
  197. * corresponding to the index key that we have to follow.
  198. */
  199. result->terminal_node.node = node;
  200. result->terminal_node.level = level;
  201. result->terminal_node.slot = slot;
  202. pr_devel("<--%s() = terminal_node\n", __func__);
  203. return assoc_array_walk_found_terminal_node;
  204. }
  205. if (assoc_array_ptr_is_node(ptr)) {
  206. /* There is a pointer to a node in the slot corresponding to
  207. * this index key segment, so we need to follow it.
  208. */
  209. cursor = ptr;
  210. level += ASSOC_ARRAY_LEVEL_STEP;
  211. if ((level & ASSOC_ARRAY_KEY_CHUNK_MASK) != 0)
  212. goto consider_node;
  213. goto jumped;
  214. }
  215. /* There is a shortcut in the slot corresponding to the index key
  216. * segment. We follow the shortcut if its partial index key matches
  217. * this leaf's. Otherwise we need to split the shortcut.
  218. */
  219. cursor = ptr;
  220. follow_shortcut:
  221. shortcut = assoc_array_ptr_to_shortcut(cursor);
  222. pr_devel("shortcut to %d\n", shortcut->skip_to_level);
  223. sc_level = level + ASSOC_ARRAY_LEVEL_STEP;
  224. BUG_ON(sc_level > shortcut->skip_to_level);
  225. do {
  226. /* Check the leaf against the shortcut's index key a word at a
  227. * time, trimming the final word (the shortcut stores the index
  228. * key completely from the root to the shortcut's target).
  229. */
  230. if ((sc_level & ASSOC_ARRAY_KEY_CHUNK_MASK) == 0)
  231. segments = ops->get_key_chunk(index_key, sc_level);
  232. sc_segments = shortcut->index_key[sc_level >> ASSOC_ARRAY_KEY_CHUNK_SHIFT];
  233. dissimilarity = segments ^ sc_segments;
  234. if (round_up(sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE) > shortcut->skip_to_level) {
  235. /* Trim segments that are beyond the shortcut */
  236. int shift = shortcut->skip_to_level & ASSOC_ARRAY_KEY_CHUNK_MASK;
  237. dissimilarity &= ~(ULONG_MAX << shift);
  238. next_sc_level = shortcut->skip_to_level;
  239. } else {
  240. next_sc_level = sc_level + ASSOC_ARRAY_KEY_CHUNK_SIZE;
  241. next_sc_level = round_down(next_sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
  242. }
  243. if (dissimilarity != 0) {
  244. /* This shortcut points elsewhere */
  245. result->wrong_shortcut.shortcut = shortcut;
  246. result->wrong_shortcut.level = level;
  247. result->wrong_shortcut.sc_level = sc_level;
  248. result->wrong_shortcut.sc_segments = sc_segments;
  249. result->wrong_shortcut.dissimilarity = dissimilarity;
  250. return assoc_array_walk_found_wrong_shortcut;
  251. }
  252. sc_level = next_sc_level;
  253. } while (sc_level < shortcut->skip_to_level);
  254. /* The shortcut matches the leaf's index to this point. */
  255. cursor = READ_ONCE(shortcut->next_node); /* Address dependency. */
  256. if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
  257. level = sc_level;
  258. goto jumped;
  259. } else {
  260. level = sc_level;
  261. goto consider_node;
  262. }
  263. }
  264. /**
  265. * assoc_array_find - Find an object by index key
  266. * @array: The associative array to search.
  267. * @ops: The operations to use.
  268. * @index_key: The key to the object.
  269. *
  270. * Find an object in an associative array by walking through the internal tree
  271. * to the node that should contain the object and then searching the leaves
  272. * there. NULL is returned if the requested object was not found in the array.
  273. *
  274. * The caller must hold the RCU read lock or better.
  275. */
  276. void *assoc_array_find(const struct assoc_array *array,
  277. const struct assoc_array_ops *ops,
  278. const void *index_key)
  279. {
  280. struct assoc_array_walk_result result;
  281. const struct assoc_array_node *node;
  282. const struct assoc_array_ptr *ptr;
  283. const void *leaf;
  284. int slot;
  285. if (assoc_array_walk(array, ops, index_key, &result) !=
  286. assoc_array_walk_found_terminal_node)
  287. return NULL;
  288. node = result.terminal_node.node;
  289. /* If the target key is available to us, it's has to be pointed to by
  290. * the terminal node.
  291. */
  292. for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  293. ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
  294. if (ptr && assoc_array_ptr_is_leaf(ptr)) {
  295. /* We need a barrier between the read of the pointer
  296. * and dereferencing the pointer - but only if we are
  297. * actually going to dereference it.
  298. */
  299. leaf = assoc_array_ptr_to_leaf(ptr);
  300. if (ops->compare_object(leaf, index_key))
  301. return (void *)leaf;
  302. }
  303. }
  304. return NULL;
  305. }
  306. /*
  307. * Destructively iterate over an associative array. The caller must prevent
  308. * other simultaneous accesses.
  309. */
  310. static void assoc_array_destroy_subtree(struct assoc_array_ptr *root,
  311. const struct assoc_array_ops *ops)
  312. {
  313. struct assoc_array_shortcut *shortcut;
  314. struct assoc_array_node *node;
  315. struct assoc_array_ptr *cursor, *parent = NULL;
  316. int slot = -1;
  317. pr_devel("-->%s()\n", __func__);
  318. cursor = root;
  319. if (!cursor) {
  320. pr_devel("empty\n");
  321. return;
  322. }
  323. move_to_meta:
  324. if (assoc_array_ptr_is_shortcut(cursor)) {
  325. /* Descend through a shortcut */
  326. pr_devel("[%d] shortcut\n", slot);
  327. BUG_ON(!assoc_array_ptr_is_shortcut(cursor));
  328. shortcut = assoc_array_ptr_to_shortcut(cursor);
  329. BUG_ON(shortcut->back_pointer != parent);
  330. BUG_ON(slot != -1 && shortcut->parent_slot != slot);
  331. parent = cursor;
  332. cursor = shortcut->next_node;
  333. slot = -1;
  334. BUG_ON(!assoc_array_ptr_is_node(cursor));
  335. }
  336. pr_devel("[%d] node\n", slot);
  337. node = assoc_array_ptr_to_node(cursor);
  338. BUG_ON(node->back_pointer != parent);
  339. BUG_ON(slot != -1 && node->parent_slot != slot);
  340. slot = 0;
  341. continue_node:
  342. pr_devel("Node %p [back=%p]\n", node, node->back_pointer);
  343. for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  344. struct assoc_array_ptr *ptr = node->slots[slot];
  345. if (!ptr)
  346. continue;
  347. if (assoc_array_ptr_is_meta(ptr)) {
  348. parent = cursor;
  349. cursor = ptr;
  350. goto move_to_meta;
  351. }
  352. if (ops) {
  353. pr_devel("[%d] free leaf\n", slot);
  354. ops->free_object(assoc_array_ptr_to_leaf(ptr));
  355. }
  356. }
  357. parent = node->back_pointer;
  358. slot = node->parent_slot;
  359. pr_devel("free node\n");
  360. kfree(node);
  361. if (!parent)
  362. return; /* Done */
  363. /* Move back up to the parent (may need to free a shortcut on
  364. * the way up) */
  365. if (assoc_array_ptr_is_shortcut(parent)) {
  366. shortcut = assoc_array_ptr_to_shortcut(parent);
  367. BUG_ON(shortcut->next_node != cursor);
  368. cursor = parent;
  369. parent = shortcut->back_pointer;
  370. slot = shortcut->parent_slot;
  371. pr_devel("free shortcut\n");
  372. kfree(shortcut);
  373. if (!parent)
  374. return;
  375. BUG_ON(!assoc_array_ptr_is_node(parent));
  376. }
  377. /* Ascend to next slot in parent node */
  378. pr_devel("ascend to %p[%d]\n", parent, slot);
  379. cursor = parent;
  380. node = assoc_array_ptr_to_node(cursor);
  381. slot++;
  382. goto continue_node;
  383. }
  384. /**
  385. * assoc_array_destroy - Destroy an associative array
  386. * @array: The array to destroy.
  387. * @ops: The operations to use.
  388. *
  389. * Discard all metadata and free all objects in an associative array. The
  390. * array will be empty and ready to use again upon completion. This function
  391. * cannot fail.
  392. *
  393. * The caller must prevent all other accesses whilst this takes place as no
  394. * attempt is made to adjust pointers gracefully to permit RCU readlock-holding
  395. * accesses to continue. On the other hand, no memory allocation is required.
  396. */
  397. void assoc_array_destroy(struct assoc_array *array,
  398. const struct assoc_array_ops *ops)
  399. {
  400. assoc_array_destroy_subtree(array->root, ops);
  401. array->root = NULL;
  402. }
  403. /*
  404. * Handle insertion into an empty tree.
  405. */
  406. static bool assoc_array_insert_in_empty_tree(struct assoc_array_edit *edit)
  407. {
  408. struct assoc_array_node *new_n0;
  409. pr_devel("-->%s()\n", __func__);
  410. new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  411. if (!new_n0)
  412. return false;
  413. edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
  414. edit->leaf_p = &new_n0->slots[0];
  415. edit->adjust_count_on = new_n0;
  416. edit->set[0].ptr = &edit->array->root;
  417. edit->set[0].to = assoc_array_node_to_ptr(new_n0);
  418. pr_devel("<--%s() = ok [no root]\n", __func__);
  419. return true;
  420. }
  421. /*
  422. * Handle insertion into a terminal node.
  423. */
  424. static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
  425. const struct assoc_array_ops *ops,
  426. const void *index_key,
  427. struct assoc_array_walk_result *result)
  428. {
  429. struct assoc_array_shortcut *shortcut, *new_s0;
  430. struct assoc_array_node *node, *new_n0, *new_n1, *side;
  431. struct assoc_array_ptr *ptr;
  432. unsigned long dissimilarity, base_seg, blank;
  433. size_t keylen;
  434. bool have_meta;
  435. int level, diff;
  436. int slot, next_slot, free_slot, i, j;
  437. node = result->terminal_node.node;
  438. level = result->terminal_node.level;
  439. edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = result->terminal_node.slot;
  440. pr_devel("-->%s()\n", __func__);
  441. /* We arrived at a node which doesn't have an onward node or shortcut
  442. * pointer that we have to follow. This means that (a) the leaf we
  443. * want must go here (either by insertion or replacement) or (b) we
  444. * need to split this node and insert in one of the fragments.
  445. */
  446. free_slot = -1;
  447. /* Firstly, we have to check the leaves in this node to see if there's
  448. * a matching one we should replace in place.
  449. */
  450. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  451. ptr = node->slots[i];
  452. if (!ptr) {
  453. free_slot = i;
  454. continue;
  455. }
  456. if (assoc_array_ptr_is_leaf(ptr) &&
  457. ops->compare_object(assoc_array_ptr_to_leaf(ptr),
  458. index_key)) {
  459. pr_devel("replace in slot %d\n", i);
  460. edit->leaf_p = &node->slots[i];
  461. edit->dead_leaf = node->slots[i];
  462. pr_devel("<--%s() = ok [replace]\n", __func__);
  463. return true;
  464. }
  465. }
  466. /* If there is a free slot in this node then we can just insert the
  467. * leaf here.
  468. */
  469. if (free_slot >= 0) {
  470. pr_devel("insert in free slot %d\n", free_slot);
  471. edit->leaf_p = &node->slots[free_slot];
  472. edit->adjust_count_on = node;
  473. pr_devel("<--%s() = ok [insert]\n", __func__);
  474. return true;
  475. }
  476. /* The node has no spare slots - so we're either going to have to split
  477. * it or insert another node before it.
  478. *
  479. * Whatever, we're going to need at least two new nodes - so allocate
  480. * those now. We may also need a new shortcut, but we deal with that
  481. * when we need it.
  482. */
  483. new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  484. if (!new_n0)
  485. return false;
  486. edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
  487. new_n1 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  488. if (!new_n1)
  489. return false;
  490. edit->new_meta[1] = assoc_array_node_to_ptr(new_n1);
  491. /* We need to find out how similar the leaves are. */
  492. pr_devel("no spare slots\n");
  493. have_meta = false;
  494. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  495. ptr = node->slots[i];
  496. if (assoc_array_ptr_is_meta(ptr)) {
  497. edit->segment_cache[i] = 0xff;
  498. have_meta = true;
  499. continue;
  500. }
  501. base_seg = ops->get_object_key_chunk(
  502. assoc_array_ptr_to_leaf(ptr), level);
  503. base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
  504. edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
  505. }
  506. if (have_meta) {
  507. pr_devel("have meta\n");
  508. goto split_node;
  509. }
  510. /* The node contains only leaves */
  511. dissimilarity = 0;
  512. base_seg = edit->segment_cache[0];
  513. for (i = 1; i < ASSOC_ARRAY_FAN_OUT; i++)
  514. dissimilarity |= edit->segment_cache[i] ^ base_seg;
  515. pr_devel("only leaves; dissimilarity=%lx\n", dissimilarity);
  516. if ((dissimilarity & ASSOC_ARRAY_FAN_MASK) == 0) {
  517. /* The old leaves all cluster in the same slot. We will need
  518. * to insert a shortcut if the new node wants to cluster with them.
  519. */
  520. if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
  521. goto all_leaves_cluster_together;
  522. /* Otherwise all the old leaves cluster in the same slot, but
  523. * the new leaf wants to go into a different slot - so we
  524. * create a new node (n0) to hold the new leaf and a pointer to
  525. * a new node (n1) holding all the old leaves.
  526. *
  527. * This can be done by falling through to the node splitting
  528. * path.
  529. */
  530. pr_devel("present leaves cluster but not new leaf\n");
  531. }
  532. split_node:
  533. pr_devel("split node\n");
  534. /* We need to split the current node. The node must contain anything
  535. * from a single leaf (in the one leaf case, this leaf will cluster
  536. * with the new leaf) and the rest meta-pointers, to all leaves, some
  537. * of which may cluster.
  538. *
  539. * It won't contain the case in which all the current leaves plus the
  540. * new leaves want to cluster in the same slot.
  541. *
  542. * We need to expel at least two leaves out of a set consisting of the
  543. * leaves in the node and the new leaf. The current meta pointers can
  544. * just be copied as they shouldn't cluster with any of the leaves.
  545. *
  546. * We need a new node (n0) to replace the current one and a new node to
  547. * take the expelled nodes (n1).
  548. */
  549. edit->set[0].to = assoc_array_node_to_ptr(new_n0);
  550. new_n0->back_pointer = node->back_pointer;
  551. new_n0->parent_slot = node->parent_slot;
  552. new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
  553. new_n1->parent_slot = -1; /* Need to calculate this */
  554. do_split_node:
  555. pr_devel("do_split_node\n");
  556. new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
  557. new_n1->nr_leaves_on_branch = 0;
  558. /* Begin by finding two matching leaves. There have to be at least two
  559. * that match - even if there are meta pointers - because any leaf that
  560. * would match a slot with a meta pointer in it must be somewhere
  561. * behind that meta pointer and cannot be here. Further, given N
  562. * remaining leaf slots, we now have N+1 leaves to go in them.
  563. */
  564. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  565. slot = edit->segment_cache[i];
  566. if (slot != 0xff)
  567. for (j = i + 1; j < ASSOC_ARRAY_FAN_OUT + 1; j++)
  568. if (edit->segment_cache[j] == slot)
  569. goto found_slot_for_multiple_occupancy;
  570. }
  571. found_slot_for_multiple_occupancy:
  572. pr_devel("same slot: %x %x [%02x]\n", i, j, slot);
  573. BUG_ON(i >= ASSOC_ARRAY_FAN_OUT);
  574. BUG_ON(j >= ASSOC_ARRAY_FAN_OUT + 1);
  575. BUG_ON(slot >= ASSOC_ARRAY_FAN_OUT);
  576. new_n1->parent_slot = slot;
  577. /* Metadata pointers cannot change slot */
  578. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
  579. if (assoc_array_ptr_is_meta(node->slots[i]))
  580. new_n0->slots[i] = node->slots[i];
  581. else
  582. new_n0->slots[i] = NULL;
  583. BUG_ON(new_n0->slots[slot] != NULL);
  584. new_n0->slots[slot] = assoc_array_node_to_ptr(new_n1);
  585. /* Filter the leaf pointers between the new nodes */
  586. free_slot = -1;
  587. next_slot = 0;
  588. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  589. if (assoc_array_ptr_is_meta(node->slots[i]))
  590. continue;
  591. if (edit->segment_cache[i] == slot) {
  592. new_n1->slots[next_slot++] = node->slots[i];
  593. new_n1->nr_leaves_on_branch++;
  594. } else {
  595. do {
  596. free_slot++;
  597. } while (new_n0->slots[free_slot] != NULL);
  598. new_n0->slots[free_slot] = node->slots[i];
  599. }
  600. }
  601. pr_devel("filtered: f=%x n=%x\n", free_slot, next_slot);
  602. if (edit->segment_cache[ASSOC_ARRAY_FAN_OUT] != slot) {
  603. do {
  604. free_slot++;
  605. } while (new_n0->slots[free_slot] != NULL);
  606. edit->leaf_p = &new_n0->slots[free_slot];
  607. edit->adjust_count_on = new_n0;
  608. } else {
  609. edit->leaf_p = &new_n1->slots[next_slot++];
  610. edit->adjust_count_on = new_n1;
  611. }
  612. BUG_ON(next_slot <= 1);
  613. edit->set_backpointers_to = assoc_array_node_to_ptr(new_n0);
  614. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  615. if (edit->segment_cache[i] == 0xff) {
  616. ptr = node->slots[i];
  617. BUG_ON(assoc_array_ptr_is_leaf(ptr));
  618. if (assoc_array_ptr_is_node(ptr)) {
  619. side = assoc_array_ptr_to_node(ptr);
  620. edit->set_backpointers[i] = &side->back_pointer;
  621. } else {
  622. shortcut = assoc_array_ptr_to_shortcut(ptr);
  623. edit->set_backpointers[i] = &shortcut->back_pointer;
  624. }
  625. }
  626. }
  627. ptr = node->back_pointer;
  628. if (!ptr)
  629. edit->set[0].ptr = &edit->array->root;
  630. else if (assoc_array_ptr_is_node(ptr))
  631. edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot];
  632. else
  633. edit->set[0].ptr = &assoc_array_ptr_to_shortcut(ptr)->next_node;
  634. edit->excised_meta[0] = assoc_array_node_to_ptr(node);
  635. pr_devel("<--%s() = ok [split node]\n", __func__);
  636. return true;
  637. all_leaves_cluster_together:
  638. /* All the leaves, new and old, want to cluster together in this node
  639. * in the same slot, so we have to replace this node with a shortcut to
  640. * skip over the identical parts of the key and then place a pair of
  641. * nodes, one inside the other, at the end of the shortcut and
  642. * distribute the keys between them.
  643. *
  644. * Firstly we need to work out where the leaves start diverging as a
  645. * bit position into their keys so that we know how big the shortcut
  646. * needs to be.
  647. *
  648. * We only need to make a single pass of N of the N+1 leaves because if
  649. * any keys differ between themselves at bit X then at least one of
  650. * them must also differ with the base key at bit X or before.
  651. */
  652. pr_devel("all leaves cluster together\n");
  653. diff = INT_MAX;
  654. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  655. int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]),
  656. index_key);
  657. if (x < diff) {
  658. BUG_ON(x < 0);
  659. diff = x;
  660. }
  661. }
  662. BUG_ON(diff == INT_MAX);
  663. BUG_ON(diff < level + ASSOC_ARRAY_LEVEL_STEP);
  664. keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
  665. keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
  666. new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
  667. keylen * sizeof(unsigned long), GFP_KERNEL);
  668. if (!new_s0)
  669. return false;
  670. edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
  671. edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
  672. new_s0->back_pointer = node->back_pointer;
  673. new_s0->parent_slot = node->parent_slot;
  674. new_s0->next_node = assoc_array_node_to_ptr(new_n0);
  675. new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
  676. new_n0->parent_slot = 0;
  677. new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
  678. new_n1->parent_slot = -1; /* Need to calculate this */
  679. new_s0->skip_to_level = level = diff & ~ASSOC_ARRAY_LEVEL_STEP_MASK;
  680. pr_devel("skip_to_level = %d [diff %d]\n", level, diff);
  681. BUG_ON(level <= 0);
  682. for (i = 0; i < keylen; i++)
  683. new_s0->index_key[i] =
  684. ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
  685. if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) {
  686. blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
  687. pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
  688. new_s0->index_key[keylen - 1] &= ~blank;
  689. }
  690. /* This now reduces to a node splitting exercise for which we'll need
  691. * to regenerate the disparity table.
  692. */
  693. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  694. ptr = node->slots[i];
  695. base_seg = ops->get_object_key_chunk(assoc_array_ptr_to_leaf(ptr),
  696. level);
  697. base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
  698. edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
  699. }
  700. base_seg = ops->get_key_chunk(index_key, level);
  701. base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
  702. edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = base_seg & ASSOC_ARRAY_FAN_MASK;
  703. goto do_split_node;
  704. }
  705. /*
  706. * Handle insertion into the middle of a shortcut.
  707. */
  708. static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
  709. const struct assoc_array_ops *ops,
  710. struct assoc_array_walk_result *result)
  711. {
  712. struct assoc_array_shortcut *shortcut, *new_s0, *new_s1;
  713. struct assoc_array_node *node, *new_n0, *side;
  714. unsigned long sc_segments, dissimilarity, blank;
  715. size_t keylen;
  716. int level, sc_level, diff;
  717. int sc_slot;
  718. shortcut = result->wrong_shortcut.shortcut;
  719. level = result->wrong_shortcut.level;
  720. sc_level = result->wrong_shortcut.sc_level;
  721. sc_segments = result->wrong_shortcut.sc_segments;
  722. dissimilarity = result->wrong_shortcut.dissimilarity;
  723. pr_devel("-->%s(ix=%d dis=%lx scix=%d)\n",
  724. __func__, level, dissimilarity, sc_level);
  725. /* We need to split a shortcut and insert a node between the two
  726. * pieces. Zero-length pieces will be dispensed with entirely.
  727. *
  728. * First of all, we need to find out in which level the first
  729. * difference was.
  730. */
  731. diff = __ffs(dissimilarity);
  732. diff &= ~ASSOC_ARRAY_LEVEL_STEP_MASK;
  733. diff += sc_level & ~ASSOC_ARRAY_KEY_CHUNK_MASK;
  734. pr_devel("diff=%d\n", diff);
  735. if (!shortcut->back_pointer) {
  736. edit->set[0].ptr = &edit->array->root;
  737. } else if (assoc_array_ptr_is_node(shortcut->back_pointer)) {
  738. node = assoc_array_ptr_to_node(shortcut->back_pointer);
  739. edit->set[0].ptr = &node->slots[shortcut->parent_slot];
  740. } else {
  741. BUG();
  742. }
  743. edit->excised_meta[0] = assoc_array_shortcut_to_ptr(shortcut);
  744. /* Create a new node now since we're going to need it anyway */
  745. new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  746. if (!new_n0)
  747. return false;
  748. edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
  749. edit->adjust_count_on = new_n0;
  750. /* Insert a new shortcut before the new node if this segment isn't of
  751. * zero length - otherwise we just connect the new node directly to the
  752. * parent.
  753. */
  754. level += ASSOC_ARRAY_LEVEL_STEP;
  755. if (diff > level) {
  756. pr_devel("pre-shortcut %d...%d\n", level, diff);
  757. keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
  758. keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
  759. new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
  760. keylen * sizeof(unsigned long), GFP_KERNEL);
  761. if (!new_s0)
  762. return false;
  763. edit->new_meta[1] = assoc_array_shortcut_to_ptr(new_s0);
  764. edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
  765. new_s0->back_pointer = shortcut->back_pointer;
  766. new_s0->parent_slot = shortcut->parent_slot;
  767. new_s0->next_node = assoc_array_node_to_ptr(new_n0);
  768. new_s0->skip_to_level = diff;
  769. new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
  770. new_n0->parent_slot = 0;
  771. memcpy(new_s0->index_key, shortcut->index_key,
  772. keylen * sizeof(unsigned long));
  773. blank = ULONG_MAX << (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
  774. pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, diff, blank);
  775. new_s0->index_key[keylen - 1] &= ~blank;
  776. } else {
  777. pr_devel("no pre-shortcut\n");
  778. edit->set[0].to = assoc_array_node_to_ptr(new_n0);
  779. new_n0->back_pointer = shortcut->back_pointer;
  780. new_n0->parent_slot = shortcut->parent_slot;
  781. }
  782. side = assoc_array_ptr_to_node(shortcut->next_node);
  783. new_n0->nr_leaves_on_branch = side->nr_leaves_on_branch;
  784. /* We need to know which slot in the new node is going to take a
  785. * metadata pointer.
  786. */
  787. sc_slot = sc_segments >> (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
  788. sc_slot &= ASSOC_ARRAY_FAN_MASK;
  789. pr_devel("new slot %lx >> %d -> %d\n",
  790. sc_segments, diff & ASSOC_ARRAY_KEY_CHUNK_MASK, sc_slot);
  791. /* Determine whether we need to follow the new node with a replacement
  792. * for the current shortcut. We could in theory reuse the current
  793. * shortcut if its parent slot number doesn't change - but that's a
  794. * 1-in-16 chance so not worth expending the code upon.
  795. */
  796. level = diff + ASSOC_ARRAY_LEVEL_STEP;
  797. if (level < shortcut->skip_to_level) {
  798. pr_devel("post-shortcut %d...%d\n", level, shortcut->skip_to_level);
  799. keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
  800. keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
  801. new_s1 = kzalloc(sizeof(struct assoc_array_shortcut) +
  802. keylen * sizeof(unsigned long), GFP_KERNEL);
  803. if (!new_s1)
  804. return false;
  805. edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s1);
  806. new_s1->back_pointer = assoc_array_node_to_ptr(new_n0);
  807. new_s1->parent_slot = sc_slot;
  808. new_s1->next_node = shortcut->next_node;
  809. new_s1->skip_to_level = shortcut->skip_to_level;
  810. new_n0->slots[sc_slot] = assoc_array_shortcut_to_ptr(new_s1);
  811. memcpy(new_s1->index_key, shortcut->index_key,
  812. keylen * sizeof(unsigned long));
  813. edit->set[1].ptr = &side->back_pointer;
  814. edit->set[1].to = assoc_array_shortcut_to_ptr(new_s1);
  815. } else {
  816. pr_devel("no post-shortcut\n");
  817. /* We don't have to replace the pointed-to node as long as we
  818. * use memory barriers to make sure the parent slot number is
  819. * changed before the back pointer (the parent slot number is
  820. * irrelevant to the old parent shortcut).
  821. */
  822. new_n0->slots[sc_slot] = shortcut->next_node;
  823. edit->set_parent_slot[0].p = &side->parent_slot;
  824. edit->set_parent_slot[0].to = sc_slot;
  825. edit->set[1].ptr = &side->back_pointer;
  826. edit->set[1].to = assoc_array_node_to_ptr(new_n0);
  827. }
  828. /* Install the new leaf in a spare slot in the new node. */
  829. if (sc_slot == 0)
  830. edit->leaf_p = &new_n0->slots[1];
  831. else
  832. edit->leaf_p = &new_n0->slots[0];
  833. pr_devel("<--%s() = ok [split shortcut]\n", __func__);
  834. return edit;
  835. }
  836. /**
  837. * assoc_array_insert - Script insertion of an object into an associative array
  838. * @array: The array to insert into.
  839. * @ops: The operations to use.
  840. * @index_key: The key to insert at.
  841. * @object: The object to insert.
  842. *
  843. * Precalculate and preallocate a script for the insertion or replacement of an
  844. * object in an associative array. This results in an edit script that can
  845. * either be applied or cancelled.
  846. *
  847. * The function returns a pointer to an edit script or -ENOMEM.
  848. *
  849. * The caller should lock against other modifications and must continue to hold
  850. * the lock until assoc_array_apply_edit() has been called.
  851. *
  852. * Accesses to the tree may take place concurrently with this function,
  853. * provided they hold the RCU read lock.
  854. */
  855. struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
  856. const struct assoc_array_ops *ops,
  857. const void *index_key,
  858. void *object)
  859. {
  860. struct assoc_array_walk_result result;
  861. struct assoc_array_edit *edit;
  862. pr_devel("-->%s()\n", __func__);
  863. /* The leaf pointer we're given must not have the bottom bit set as we
  864. * use those for type-marking the pointer. NULL pointers are also not
  865. * allowed as they indicate an empty slot but we have to allow them
  866. * here as they can be updated later.
  867. */
  868. BUG_ON(assoc_array_ptr_is_meta(object));
  869. edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
  870. if (!edit)
  871. return ERR_PTR(-ENOMEM);
  872. edit->array = array;
  873. edit->ops = ops;
  874. edit->leaf = assoc_array_leaf_to_ptr(object);
  875. edit->adjust_count_by = 1;
  876. switch (assoc_array_walk(array, ops, index_key, &result)) {
  877. case assoc_array_walk_tree_empty:
  878. /* Allocate a root node if there isn't one yet */
  879. if (!assoc_array_insert_in_empty_tree(edit))
  880. goto enomem;
  881. return edit;
  882. case assoc_array_walk_found_terminal_node:
  883. /* We found a node that doesn't have a node/shortcut pointer in
  884. * the slot corresponding to the index key that we have to
  885. * follow.
  886. */
  887. if (!assoc_array_insert_into_terminal_node(edit, ops, index_key,
  888. &result))
  889. goto enomem;
  890. return edit;
  891. case assoc_array_walk_found_wrong_shortcut:
  892. /* We found a shortcut that didn't match our key in a slot we
  893. * needed to follow.
  894. */
  895. if (!assoc_array_insert_mid_shortcut(edit, ops, &result))
  896. goto enomem;
  897. return edit;
  898. }
  899. enomem:
  900. /* Clean up after an out of memory error */
  901. pr_devel("enomem\n");
  902. assoc_array_cancel_edit(edit);
  903. return ERR_PTR(-ENOMEM);
  904. }
  905. /**
  906. * assoc_array_insert_set_object - Set the new object pointer in an edit script
  907. * @edit: The edit script to modify.
  908. * @object: The object pointer to set.
  909. *
  910. * Change the object to be inserted in an edit script. The object pointed to
  911. * by the old object is not freed. This must be done prior to applying the
  912. * script.
  913. */
  914. void assoc_array_insert_set_object(struct assoc_array_edit *edit, void *object)
  915. {
  916. BUG_ON(!object);
  917. edit->leaf = assoc_array_leaf_to_ptr(object);
  918. }
  919. struct assoc_array_delete_collapse_context {
  920. struct assoc_array_node *node;
  921. const void *skip_leaf;
  922. int slot;
  923. };
  924. /*
  925. * Subtree collapse to node iterator.
  926. */
  927. static int assoc_array_delete_collapse_iterator(const void *leaf,
  928. void *iterator_data)
  929. {
  930. struct assoc_array_delete_collapse_context *collapse = iterator_data;
  931. if (leaf == collapse->skip_leaf)
  932. return 0;
  933. BUG_ON(collapse->slot >= ASSOC_ARRAY_FAN_OUT);
  934. collapse->node->slots[collapse->slot++] = assoc_array_leaf_to_ptr(leaf);
  935. return 0;
  936. }
  937. /**
  938. * assoc_array_delete - Script deletion of an object from an associative array
  939. * @array: The array to search.
  940. * @ops: The operations to use.
  941. * @index_key: The key to the object.
  942. *
  943. * Precalculate and preallocate a script for the deletion of an object from an
  944. * associative array. This results in an edit script that can either be
  945. * applied or cancelled.
  946. *
  947. * The function returns a pointer to an edit script if the object was found,
  948. * NULL if the object was not found or -ENOMEM.
  949. *
  950. * The caller should lock against other modifications and must continue to hold
  951. * the lock until assoc_array_apply_edit() has been called.
  952. *
  953. * Accesses to the tree may take place concurrently with this function,
  954. * provided they hold the RCU read lock.
  955. */
  956. struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
  957. const struct assoc_array_ops *ops,
  958. const void *index_key)
  959. {
  960. struct assoc_array_delete_collapse_context collapse;
  961. struct assoc_array_walk_result result;
  962. struct assoc_array_node *node, *new_n0;
  963. struct assoc_array_edit *edit;
  964. struct assoc_array_ptr *ptr;
  965. bool has_meta;
  966. int slot, i;
  967. pr_devel("-->%s()\n", __func__);
  968. edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
  969. if (!edit)
  970. return ERR_PTR(-ENOMEM);
  971. edit->array = array;
  972. edit->ops = ops;
  973. edit->adjust_count_by = -1;
  974. switch (assoc_array_walk(array, ops, index_key, &result)) {
  975. case assoc_array_walk_found_terminal_node:
  976. /* We found a node that should contain the leaf we've been
  977. * asked to remove - *if* it's in the tree.
  978. */
  979. pr_devel("terminal_node\n");
  980. node = result.terminal_node.node;
  981. for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  982. ptr = node->slots[slot];
  983. if (ptr &&
  984. assoc_array_ptr_is_leaf(ptr) &&
  985. ops->compare_object(assoc_array_ptr_to_leaf(ptr),
  986. index_key))
  987. goto found_leaf;
  988. }
  989. case assoc_array_walk_tree_empty:
  990. case assoc_array_walk_found_wrong_shortcut:
  991. default:
  992. assoc_array_cancel_edit(edit);
  993. pr_devel("not found\n");
  994. return NULL;
  995. }
  996. found_leaf:
  997. BUG_ON(array->nr_leaves_on_tree <= 0);
  998. /* In the simplest form of deletion we just clear the slot and release
  999. * the leaf after a suitable interval.
  1000. */
  1001. edit->dead_leaf = node->slots[slot];
  1002. edit->set[0].ptr = &node->slots[slot];
  1003. edit->set[0].to = NULL;
  1004. edit->adjust_count_on = node;
  1005. /* If that concludes erasure of the last leaf, then delete the entire
  1006. * internal array.
  1007. */
  1008. if (array->nr_leaves_on_tree == 1) {
  1009. edit->set[1].ptr = &array->root;
  1010. edit->set[1].to = NULL;
  1011. edit->adjust_count_on = NULL;
  1012. edit->excised_subtree = array->root;
  1013. pr_devel("all gone\n");
  1014. return edit;
  1015. }
  1016. /* However, we'd also like to clear up some metadata blocks if we
  1017. * possibly can.
  1018. *
  1019. * We go for a simple algorithm of: if this node has FAN_OUT or fewer
  1020. * leaves in it, then attempt to collapse it - and attempt to
  1021. * recursively collapse up the tree.
  1022. *
  1023. * We could also try and collapse in partially filled subtrees to take
  1024. * up space in this node.
  1025. */
  1026. if (node->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
  1027. struct assoc_array_node *parent, *grandparent;
  1028. struct assoc_array_ptr *ptr;
  1029. /* First of all, we need to know if this node has metadata so
  1030. * that we don't try collapsing if all the leaves are already
  1031. * here.
  1032. */
  1033. has_meta = false;
  1034. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  1035. ptr = node->slots[i];
  1036. if (assoc_array_ptr_is_meta(ptr)) {
  1037. has_meta = true;
  1038. break;
  1039. }
  1040. }
  1041. pr_devel("leaves: %ld [m=%d]\n",
  1042. node->nr_leaves_on_branch - 1, has_meta);
  1043. /* Look further up the tree to see if we can collapse this node
  1044. * into a more proximal node too.
  1045. */
  1046. parent = node;
  1047. collapse_up:
  1048. pr_devel("collapse subtree: %ld\n", parent->nr_leaves_on_branch);
  1049. ptr = parent->back_pointer;
  1050. if (!ptr)
  1051. goto do_collapse;
  1052. if (assoc_array_ptr_is_shortcut(ptr)) {
  1053. struct assoc_array_shortcut *s = assoc_array_ptr_to_shortcut(ptr);
  1054. ptr = s->back_pointer;
  1055. if (!ptr)
  1056. goto do_collapse;
  1057. }
  1058. grandparent = assoc_array_ptr_to_node(ptr);
  1059. if (grandparent->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
  1060. parent = grandparent;
  1061. goto collapse_up;
  1062. }
  1063. do_collapse:
  1064. /* There's no point collapsing if the original node has no meta
  1065. * pointers to discard and if we didn't merge into one of that
  1066. * node's ancestry.
  1067. */
  1068. if (has_meta || parent != node) {
  1069. node = parent;
  1070. /* Create a new node to collapse into */
  1071. new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  1072. if (!new_n0)
  1073. goto enomem;
  1074. edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
  1075. new_n0->back_pointer = node->back_pointer;
  1076. new_n0->parent_slot = node->parent_slot;
  1077. new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
  1078. edit->adjust_count_on = new_n0;
  1079. collapse.node = new_n0;
  1080. collapse.skip_leaf = assoc_array_ptr_to_leaf(edit->dead_leaf);
  1081. collapse.slot = 0;
  1082. assoc_array_subtree_iterate(assoc_array_node_to_ptr(node),
  1083. node->back_pointer,
  1084. assoc_array_delete_collapse_iterator,
  1085. &collapse);
  1086. pr_devel("collapsed %d,%lu\n", collapse.slot, new_n0->nr_leaves_on_branch);
  1087. BUG_ON(collapse.slot != new_n0->nr_leaves_on_branch - 1);
  1088. if (!node->back_pointer) {
  1089. edit->set[1].ptr = &array->root;
  1090. } else if (assoc_array_ptr_is_leaf(node->back_pointer)) {
  1091. BUG();
  1092. } else if (assoc_array_ptr_is_node(node->back_pointer)) {
  1093. struct assoc_array_node *p =
  1094. assoc_array_ptr_to_node(node->back_pointer);
  1095. edit->set[1].ptr = &p->slots[node->parent_slot];
  1096. } else if (assoc_array_ptr_is_shortcut(node->back_pointer)) {
  1097. struct assoc_array_shortcut *s =
  1098. assoc_array_ptr_to_shortcut(node->back_pointer);
  1099. edit->set[1].ptr = &s->next_node;
  1100. }
  1101. edit->set[1].to = assoc_array_node_to_ptr(new_n0);
  1102. edit->excised_subtree = assoc_array_node_to_ptr(node);
  1103. }
  1104. }
  1105. return edit;
  1106. enomem:
  1107. /* Clean up after an out of memory error */
  1108. pr_devel("enomem\n");
  1109. assoc_array_cancel_edit(edit);
  1110. return ERR_PTR(-ENOMEM);
  1111. }
  1112. /**
  1113. * assoc_array_clear - Script deletion of all objects from an associative array
  1114. * @array: The array to clear.
  1115. * @ops: The operations to use.
  1116. *
  1117. * Precalculate and preallocate a script for the deletion of all the objects
  1118. * from an associative array. This results in an edit script that can either
  1119. * be applied or cancelled.
  1120. *
  1121. * The function returns a pointer to an edit script if there are objects to be
  1122. * deleted, NULL if there are no objects in the array or -ENOMEM.
  1123. *
  1124. * The caller should lock against other modifications and must continue to hold
  1125. * the lock until assoc_array_apply_edit() has been called.
  1126. *
  1127. * Accesses to the tree may take place concurrently with this function,
  1128. * provided they hold the RCU read lock.
  1129. */
  1130. struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
  1131. const struct assoc_array_ops *ops)
  1132. {
  1133. struct assoc_array_edit *edit;
  1134. pr_devel("-->%s()\n", __func__);
  1135. if (!array->root)
  1136. return NULL;
  1137. edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
  1138. if (!edit)
  1139. return ERR_PTR(-ENOMEM);
  1140. edit->array = array;
  1141. edit->ops = ops;
  1142. edit->set[1].ptr = &array->root;
  1143. edit->set[1].to = NULL;
  1144. edit->excised_subtree = array->root;
  1145. edit->ops_for_excised_subtree = ops;
  1146. pr_devel("all gone\n");
  1147. return edit;
  1148. }
  1149. /*
  1150. * Handle the deferred destruction after an applied edit.
  1151. */
  1152. static void assoc_array_rcu_cleanup(struct rcu_head *head)
  1153. {
  1154. struct assoc_array_edit *edit =
  1155. container_of(head, struct assoc_array_edit, rcu);
  1156. int i;
  1157. pr_devel("-->%s()\n", __func__);
  1158. if (edit->dead_leaf)
  1159. edit->ops->free_object(assoc_array_ptr_to_leaf(edit->dead_leaf));
  1160. for (i = 0; i < ARRAY_SIZE(edit->excised_meta); i++)
  1161. if (edit->excised_meta[i])
  1162. kfree(assoc_array_ptr_to_node(edit->excised_meta[i]));
  1163. if (edit->excised_subtree) {
  1164. BUG_ON(assoc_array_ptr_is_leaf(edit->excised_subtree));
  1165. if (assoc_array_ptr_is_node(edit->excised_subtree)) {
  1166. struct assoc_array_node *n =
  1167. assoc_array_ptr_to_node(edit->excised_subtree);
  1168. n->back_pointer = NULL;
  1169. } else {
  1170. struct assoc_array_shortcut *s =
  1171. assoc_array_ptr_to_shortcut(edit->excised_subtree);
  1172. s->back_pointer = NULL;
  1173. }
  1174. assoc_array_destroy_subtree(edit->excised_subtree,
  1175. edit->ops_for_excised_subtree);
  1176. }
  1177. kfree(edit);
  1178. }
  1179. /**
  1180. * assoc_array_apply_edit - Apply an edit script to an associative array
  1181. * @edit: The script to apply.
  1182. *
  1183. * Apply an edit script to an associative array to effect an insertion,
  1184. * deletion or clearance. As the edit script includes preallocated memory,
  1185. * this is guaranteed not to fail.
  1186. *
  1187. * The edit script, dead objects and dead metadata will be scheduled for
  1188. * destruction after an RCU grace period to permit those doing read-only
  1189. * accesses on the array to continue to do so under the RCU read lock whilst
  1190. * the edit is taking place.
  1191. */
  1192. void assoc_array_apply_edit(struct assoc_array_edit *edit)
  1193. {
  1194. struct assoc_array_shortcut *shortcut;
  1195. struct assoc_array_node *node;
  1196. struct assoc_array_ptr *ptr;
  1197. int i;
  1198. pr_devel("-->%s()\n", __func__);
  1199. smp_wmb();
  1200. if (edit->leaf_p)
  1201. *edit->leaf_p = edit->leaf;
  1202. smp_wmb();
  1203. for (i = 0; i < ARRAY_SIZE(edit->set_parent_slot); i++)
  1204. if (edit->set_parent_slot[i].p)
  1205. *edit->set_parent_slot[i].p = edit->set_parent_slot[i].to;
  1206. smp_wmb();
  1207. for (i = 0; i < ARRAY_SIZE(edit->set_backpointers); i++)
  1208. if (edit->set_backpointers[i])
  1209. *edit->set_backpointers[i] = edit->set_backpointers_to;
  1210. smp_wmb();
  1211. for (i = 0; i < ARRAY_SIZE(edit->set); i++)
  1212. if (edit->set[i].ptr)
  1213. *edit->set[i].ptr = edit->set[i].to;
  1214. if (edit->array->root == NULL) {
  1215. edit->array->nr_leaves_on_tree = 0;
  1216. } else if (edit->adjust_count_on) {
  1217. node = edit->adjust_count_on;
  1218. for (;;) {
  1219. node->nr_leaves_on_branch += edit->adjust_count_by;
  1220. ptr = node->back_pointer;
  1221. if (!ptr)
  1222. break;
  1223. if (assoc_array_ptr_is_shortcut(ptr)) {
  1224. shortcut = assoc_array_ptr_to_shortcut(ptr);
  1225. ptr = shortcut->back_pointer;
  1226. if (!ptr)
  1227. break;
  1228. }
  1229. BUG_ON(!assoc_array_ptr_is_node(ptr));
  1230. node = assoc_array_ptr_to_node(ptr);
  1231. }
  1232. edit->array->nr_leaves_on_tree += edit->adjust_count_by;
  1233. }
  1234. call_rcu(&edit->rcu, assoc_array_rcu_cleanup);
  1235. }
  1236. /**
  1237. * assoc_array_cancel_edit - Discard an edit script.
  1238. * @edit: The script to discard.
  1239. *
  1240. * Free an edit script and all the preallocated data it holds without making
  1241. * any changes to the associative array it was intended for.
  1242. *
  1243. * NOTE! In the case of an insertion script, this does _not_ release the leaf
  1244. * that was to be inserted. That is left to the caller.
  1245. */
  1246. void assoc_array_cancel_edit(struct assoc_array_edit *edit)
  1247. {
  1248. struct assoc_array_ptr *ptr;
  1249. int i;
  1250. pr_devel("-->%s()\n", __func__);
  1251. /* Clean up after an out of memory error */
  1252. for (i = 0; i < ARRAY_SIZE(edit->new_meta); i++) {
  1253. ptr = edit->new_meta[i];
  1254. if (ptr) {
  1255. if (assoc_array_ptr_is_node(ptr))
  1256. kfree(assoc_array_ptr_to_node(ptr));
  1257. else
  1258. kfree(assoc_array_ptr_to_shortcut(ptr));
  1259. }
  1260. }
  1261. kfree(edit);
  1262. }
  1263. /**
  1264. * assoc_array_gc - Garbage collect an associative array.
  1265. * @array: The array to clean.
  1266. * @ops: The operations to use.
  1267. * @iterator: A callback function to pass judgement on each object.
  1268. * @iterator_data: Private data for the callback function.
  1269. *
  1270. * Collect garbage from an associative array and pack down the internal tree to
  1271. * save memory.
  1272. *
  1273. * The iterator function is asked to pass judgement upon each object in the
  1274. * array. If it returns false, the object is discard and if it returns true,
  1275. * the object is kept. If it returns true, it must increment the object's
  1276. * usage count (or whatever it needs to do to retain it) before returning.
  1277. *
  1278. * This function returns 0 if successful or -ENOMEM if out of memory. In the
  1279. * latter case, the array is not changed.
  1280. *
  1281. * The caller should lock against other modifications and must continue to hold
  1282. * the lock until assoc_array_apply_edit() has been called.
  1283. *
  1284. * Accesses to the tree may take place concurrently with this function,
  1285. * provided they hold the RCU read lock.
  1286. */
  1287. int assoc_array_gc(struct assoc_array *array,
  1288. const struct assoc_array_ops *ops,
  1289. bool (*iterator)(void *object, void *iterator_data),
  1290. void *iterator_data)
  1291. {
  1292. struct assoc_array_shortcut *shortcut, *new_s;
  1293. struct assoc_array_node *node, *new_n;
  1294. struct assoc_array_edit *edit;
  1295. struct assoc_array_ptr *cursor, *ptr;
  1296. struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
  1297. unsigned long nr_leaves_on_tree;
  1298. int keylen, slot, nr_free, next_slot, i;
  1299. pr_devel("-->%s()\n", __func__);
  1300. if (!array->root)
  1301. return 0;
  1302. edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
  1303. if (!edit)
  1304. return -ENOMEM;
  1305. edit->array = array;
  1306. edit->ops = ops;
  1307. edit->ops_for_excised_subtree = ops;
  1308. edit->set[0].ptr = &array->root;
  1309. edit->excised_subtree = array->root;
  1310. new_root = new_parent = NULL;
  1311. new_ptr_pp = &new_root;
  1312. cursor = array->root;
  1313. descend:
  1314. /* If this point is a shortcut, then we need to duplicate it and
  1315. * advance the target cursor.
  1316. */
  1317. if (assoc_array_ptr_is_shortcut(cursor)) {
  1318. shortcut = assoc_array_ptr_to_shortcut(cursor);
  1319. keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
  1320. keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
  1321. new_s = kmalloc(sizeof(struct assoc_array_shortcut) +
  1322. keylen * sizeof(unsigned long), GFP_KERNEL);
  1323. if (!new_s)
  1324. goto enomem;
  1325. pr_devel("dup shortcut %p -> %p\n", shortcut, new_s);
  1326. memcpy(new_s, shortcut, (sizeof(struct assoc_array_shortcut) +
  1327. keylen * sizeof(unsigned long)));
  1328. new_s->back_pointer = new_parent;
  1329. new_s->parent_slot = shortcut->parent_slot;
  1330. *new_ptr_pp = new_parent = assoc_array_shortcut_to_ptr(new_s);
  1331. new_ptr_pp = &new_s->next_node;
  1332. cursor = shortcut->next_node;
  1333. }
  1334. /* Duplicate the node at this position */
  1335. node = assoc_array_ptr_to_node(cursor);
  1336. new_n = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  1337. if (!new_n)
  1338. goto enomem;
  1339. pr_devel("dup node %p -> %p\n", node, new_n);
  1340. new_n->back_pointer = new_parent;
  1341. new_n->parent_slot = node->parent_slot;
  1342. *new_ptr_pp = new_parent = assoc_array_node_to_ptr(new_n);
  1343. new_ptr_pp = NULL;
  1344. slot = 0;
  1345. continue_node:
  1346. /* Filter across any leaves and gc any subtrees */
  1347. for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  1348. ptr = node->slots[slot];
  1349. if (!ptr)
  1350. continue;
  1351. if (assoc_array_ptr_is_leaf(ptr)) {
  1352. if (iterator(assoc_array_ptr_to_leaf(ptr),
  1353. iterator_data))
  1354. /* The iterator will have done any reference
  1355. * counting on the object for us.
  1356. */
  1357. new_n->slots[slot] = ptr;
  1358. continue;
  1359. }
  1360. new_ptr_pp = &new_n->slots[slot];
  1361. cursor = ptr;
  1362. goto descend;
  1363. }
  1364. pr_devel("-- compress node %p --\n", new_n);
  1365. /* Count up the number of empty slots in this node and work out the
  1366. * subtree leaf count.
  1367. */
  1368. new_n->nr_leaves_on_branch = 0;
  1369. nr_free = 0;
  1370. for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  1371. ptr = new_n->slots[slot];
  1372. if (!ptr)
  1373. nr_free++;
  1374. else if (assoc_array_ptr_is_leaf(ptr))
  1375. new_n->nr_leaves_on_branch++;
  1376. }
  1377. pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
  1378. /* See what we can fold in */
  1379. next_slot = 0;
  1380. for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  1381. struct assoc_array_shortcut *s;
  1382. struct assoc_array_node *child;
  1383. ptr = new_n->slots[slot];
  1384. if (!ptr || assoc_array_ptr_is_leaf(ptr))
  1385. continue;
  1386. s = NULL;
  1387. if (assoc_array_ptr_is_shortcut(ptr)) {
  1388. s = assoc_array_ptr_to_shortcut(ptr);
  1389. ptr = s->next_node;
  1390. }
  1391. child = assoc_array_ptr_to_node(ptr);
  1392. new_n->nr_leaves_on_branch += child->nr_leaves_on_branch;
  1393. if (child->nr_leaves_on_branch <= nr_free + 1) {
  1394. /* Fold the child node into this one */
  1395. pr_devel("[%d] fold node %lu/%d [nx %d]\n",
  1396. slot, child->nr_leaves_on_branch, nr_free + 1,
  1397. next_slot);
  1398. /* We would already have reaped an intervening shortcut
  1399. * on the way back up the tree.
  1400. */
  1401. BUG_ON(s);
  1402. new_n->slots[slot] = NULL;
  1403. nr_free++;
  1404. if (slot < next_slot)
  1405. next_slot = slot;
  1406. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  1407. struct assoc_array_ptr *p = child->slots[i];
  1408. if (!p)
  1409. continue;
  1410. BUG_ON(assoc_array_ptr_is_meta(p));
  1411. while (new_n->slots[next_slot])
  1412. next_slot++;
  1413. BUG_ON(next_slot >= ASSOC_ARRAY_FAN_OUT);
  1414. new_n->slots[next_slot++] = p;
  1415. nr_free--;
  1416. }
  1417. kfree(child);
  1418. } else {
  1419. pr_devel("[%d] retain node %lu/%d [nx %d]\n",
  1420. slot, child->nr_leaves_on_branch, nr_free + 1,
  1421. next_slot);
  1422. }
  1423. }
  1424. pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
  1425. nr_leaves_on_tree = new_n->nr_leaves_on_branch;
  1426. /* Excise this node if it is singly occupied by a shortcut */
  1427. if (nr_free == ASSOC_ARRAY_FAN_OUT - 1) {
  1428. for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++)
  1429. if ((ptr = new_n->slots[slot]))
  1430. break;
  1431. if (assoc_array_ptr_is_meta(ptr) &&
  1432. assoc_array_ptr_is_shortcut(ptr)) {
  1433. pr_devel("excise node %p with 1 shortcut\n", new_n);
  1434. new_s = assoc_array_ptr_to_shortcut(ptr);
  1435. new_parent = new_n->back_pointer;
  1436. slot = new_n->parent_slot;
  1437. kfree(new_n);
  1438. if (!new_parent) {
  1439. new_s->back_pointer = NULL;
  1440. new_s->parent_slot = 0;
  1441. new_root = ptr;
  1442. goto gc_complete;
  1443. }
  1444. if (assoc_array_ptr_is_shortcut(new_parent)) {
  1445. /* We can discard any preceding shortcut also */
  1446. struct assoc_array_shortcut *s =
  1447. assoc_array_ptr_to_shortcut(new_parent);
  1448. pr_devel("excise preceding shortcut\n");
  1449. new_parent = new_s->back_pointer = s->back_pointer;
  1450. slot = new_s->parent_slot = s->parent_slot;
  1451. kfree(s);
  1452. if (!new_parent) {
  1453. new_s->back_pointer = NULL;
  1454. new_s->parent_slot = 0;
  1455. new_root = ptr;
  1456. goto gc_complete;
  1457. }
  1458. }
  1459. new_s->back_pointer = new_parent;
  1460. new_s->parent_slot = slot;
  1461. new_n = assoc_array_ptr_to_node(new_parent);
  1462. new_n->slots[slot] = ptr;
  1463. goto ascend_old_tree;
  1464. }
  1465. }
  1466. /* Excise any shortcuts we might encounter that point to nodes that
  1467. * only contain leaves.
  1468. */
  1469. ptr = new_n->back_pointer;
  1470. if (!ptr)
  1471. goto gc_complete;
  1472. if (assoc_array_ptr_is_shortcut(ptr)) {
  1473. new_s = assoc_array_ptr_to_shortcut(ptr);
  1474. new_parent = new_s->back_pointer;
  1475. slot = new_s->parent_slot;
  1476. if (new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
  1477. struct assoc_array_node *n;
  1478. pr_devel("excise shortcut\n");
  1479. new_n->back_pointer = new_parent;
  1480. new_n->parent_slot = slot;
  1481. kfree(new_s);
  1482. if (!new_parent) {
  1483. new_root = assoc_array_node_to_ptr(new_n);
  1484. goto gc_complete;
  1485. }
  1486. n = assoc_array_ptr_to_node(new_parent);
  1487. n->slots[slot] = assoc_array_node_to_ptr(new_n);
  1488. }
  1489. } else {
  1490. new_parent = ptr;
  1491. }
  1492. new_n = assoc_array_ptr_to_node(new_parent);
  1493. ascend_old_tree:
  1494. ptr = node->back_pointer;
  1495. if (assoc_array_ptr_is_shortcut(ptr)) {
  1496. shortcut = assoc_array_ptr_to_shortcut(ptr);
  1497. slot = shortcut->parent_slot;
  1498. cursor = shortcut->back_pointer;
  1499. if (!cursor)
  1500. goto gc_complete;
  1501. } else {
  1502. slot = node->parent_slot;
  1503. cursor = ptr;
  1504. }
  1505. BUG_ON(!cursor);
  1506. node = assoc_array_ptr_to_node(cursor);
  1507. slot++;
  1508. goto continue_node;
  1509. gc_complete:
  1510. edit->set[0].to = new_root;
  1511. assoc_array_apply_edit(edit);
  1512. array->nr_leaves_on_tree = nr_leaves_on_tree;
  1513. return 0;
  1514. enomem:
  1515. pr_devel("enomem\n");
  1516. assoc_array_destroy_subtree(new_root, edit->ops);
  1517. kfree(edit);
  1518. return -ENOMEM;
  1519. }