irqueue.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922
  1. /*********************************************************************
  2. *
  3. * Filename: irqueue.c
  4. * Version: 0.3
  5. * Description: General queue implementation
  6. * Status: Experimental.
  7. * Author: Dag Brattli <dagb@cs.uit.no>
  8. * Created at: Tue Jun 9 13:29:31 1998
  9. * Modified at: Sun Dec 12 13:48:22 1999
  10. * Modified by: Dag Brattli <dagb@cs.uit.no>
  11. * Modified at: Thu Jan 4 14:29:10 CET 2001
  12. * Modified by: Marc Zyngier <mzyngier@freesurf.fr>
  13. *
  14. * Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no>
  15. * Copyright (C) 1998, Dag Brattli,
  16. * All Rights Reserved.
  17. *
  18. * This code is taken from the Vortex Operating System written by Aage
  19. * Kvalnes. Aage has agreed that this code can use the GPL licence,
  20. * although he does not use that licence in his own code.
  21. *
  22. * This copyright does however _not_ include the ELF hash() function
  23. * which I currently don't know which licence or copyright it
  24. * has. Please inform me if you know.
  25. *
  26. * This program is free software; you can redistribute it and/or
  27. * modify it under the terms of the GNU General Public License as
  28. * published by the Free Software Foundation; either version 2 of
  29. * the License, or (at your option) any later version.
  30. *
  31. * Neither Dag Brattli nor University of Tromsø admit liability nor
  32. * provide warranty for any of this software. This material is
  33. * provided "AS-IS" and at no charge.
  34. *
  35. ********************************************************************/
  36. /*
  37. * NOTE :
  38. * There are various problems with this package :
  39. * o the hash function for ints is pathetic (but could be changed)
  40. * o locking is sometime suspicious (especially during enumeration)
  41. * o most users have only a few elements (== overhead)
  42. * o most users never use search, so don't benefit from hashing
  43. * Problem already fixed :
  44. * o not 64 bit compliant (most users do hashv = (int) self)
  45. * o hashbin_remove() is broken => use hashbin_remove_this()
  46. * I think most users would be better served by a simple linked list
  47. * (like include/linux/list.h) with a global spinlock per list.
  48. * Jean II
  49. */
  50. /*
  51. * Notes on the concurrent access to hashbin and other SMP issues
  52. * -------------------------------------------------------------
  53. * Hashbins are very often in the IrDA stack a global repository of
  54. * information, and therefore used in a very asynchronous manner following
  55. * various events (driver calls, timers, user calls...).
  56. * Therefore, very often it is highly important to consider the
  57. * management of concurrent access to the hashbin and how to guarantee the
  58. * consistency of the operations on it.
  59. *
  60. * First, we need to define the objective of locking :
  61. * 1) Protect user data (content pointed by the hashbin)
  62. * 2) Protect hashbin structure itself (linked list in each bin)
  63. *
  64. * OLD LOCKING
  65. * -----------
  66. *
  67. * The previous locking strategy, either HB_LOCAL or HB_GLOBAL were
  68. * both inadequate in *both* aspect.
  69. * o HB_GLOBAL was using a spinlock for each bin (local locking).
  70. * o HB_LOCAL was disabling irq on *all* CPUs, so use a single
  71. * global semaphore.
  72. * The problems were :
  73. * A) Global irq disabling is no longer supported by the kernel
  74. * B) No protection for the hashbin struct global data
  75. * o hashbin_delete()
  76. * o hb_current
  77. * C) No protection for user data in some cases
  78. *
  79. * A) HB_LOCAL use global irq disabling, so doesn't work on kernel
  80. * 2.5.X. Even when it is supported (kernel 2.4.X and earlier), its
  81. * performance is not satisfactory on SMP setups. Most hashbins were
  82. * HB_LOCAL, so (A) definitely need fixing.
  83. * B) HB_LOCAL could be modified to fix (B). However, because HB_GLOBAL
  84. * lock only the individual bins, it will never be able to lock the
  85. * global data, so can't do (B).
  86. * C) Some functions return pointer to data that is still in the
  87. * hashbin :
  88. * o hashbin_find()
  89. * o hashbin_get_first()
  90. * o hashbin_get_next()
  91. * As the data is still in the hashbin, it may be changed or free'd
  92. * while the caller is examinimg the data. In those case, locking can't
  93. * be done within the hashbin, but must include use of the data within
  94. * the caller.
  95. * The caller can easily do this with HB_LOCAL (just disable irqs).
  96. * However, this is impossible with HB_GLOBAL because the caller has no
  97. * way to know the proper bin, so don't know which spinlock to use.
  98. *
  99. * Quick summary : can no longer use HB_LOCAL, and HB_GLOBAL is
  100. * fundamentally broken and will never work.
  101. *
  102. * NEW LOCKING
  103. * -----------
  104. *
  105. * To fix those problems, I've introduce a few changes in the
  106. * hashbin locking :
  107. * 1) New HB_LOCK scheme
  108. * 2) hashbin->hb_spinlock
  109. * 3) New hashbin usage policy
  110. *
  111. * HB_LOCK :
  112. * -------
  113. * HB_LOCK is a locking scheme intermediate between the old HB_LOCAL
  114. * and HB_GLOBAL. It uses a single spinlock to protect the whole content
  115. * of the hashbin. As it is a single spinlock, it can protect the global
  116. * data of the hashbin and not only the bins themselves.
  117. * HB_LOCK can only protect some of the hashbin calls, so it only lock
  118. * call that can be made 100% safe and leave other call unprotected.
  119. * HB_LOCK in theory is slower than HB_GLOBAL, but as the hashbin
  120. * content is always small contention is not high, so it doesn't matter
  121. * much. HB_LOCK is probably faster than HB_LOCAL.
  122. *
  123. * hashbin->hb_spinlock :
  124. * --------------------
  125. * The spinlock that HB_LOCK uses is available for caller, so that
  126. * the caller can protect unprotected calls (see below).
  127. * If the caller want to do entirely its own locking (HB_NOLOCK), he
  128. * can do so and may use safely this spinlock.
  129. * Locking is done like this :
  130. * spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  131. * Releasing the lock :
  132. * spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  133. *
  134. * Safe & Protected calls :
  135. * ----------------------
  136. * The following calls are safe or protected via HB_LOCK :
  137. * o hashbin_new() -> safe
  138. * o hashbin_delete()
  139. * o hashbin_insert()
  140. * o hashbin_remove_first()
  141. * o hashbin_remove()
  142. * o hashbin_remove_this()
  143. * o HASHBIN_GET_SIZE() -> atomic
  144. *
  145. * The following calls only protect the hashbin itself :
  146. * o hashbin_lock_find()
  147. * o hashbin_find_next()
  148. *
  149. * Unprotected calls :
  150. * -----------------
  151. * The following calls need to be protected by the caller :
  152. * o hashbin_find()
  153. * o hashbin_get_first()
  154. * o hashbin_get_next()
  155. *
  156. * Locking Policy :
  157. * --------------
  158. * If the hashbin is used only in a single thread of execution
  159. * (explicitly or implicitely), you can use HB_NOLOCK
  160. * If the calling module already provide concurrent access protection,
  161. * you may use HB_NOLOCK.
  162. *
  163. * In all other cases, you need to use HB_LOCK and lock the hashbin
  164. * every time before calling one of the unprotected calls. You also must
  165. * use the pointer returned by the unprotected call within the locked
  166. * region.
  167. *
  168. * Extra care for enumeration :
  169. * --------------------------
  170. * hashbin_get_first() and hashbin_get_next() use the hashbin to
  171. * store the current position, in hb_current.
  172. * As long as the hashbin remains locked, this is safe. If you unlock
  173. * the hashbin, the current position may change if anybody else modify
  174. * or enumerate the hashbin.
  175. * Summary : do the full enumeration while locked.
  176. *
  177. * Alternatively, you may use hashbin_find_next(). But, this will
  178. * be slower, is more complex to use and doesn't protect the hashbin
  179. * content. So, care is needed here as well.
  180. *
  181. * Other issues :
  182. * ------------
  183. * I believe that we are overdoing it by using spin_lock_irqsave()
  184. * and we should use only spin_lock_bh() or similar. But, I don't have
  185. * the balls to try it out.
  186. * Don't believe that because hashbin are now (somewhat) SMP safe
  187. * that the rest of the code is. Higher layers tend to be safest,
  188. * but LAP and LMP would need some serious dedicated love.
  189. *
  190. * Jean II
  191. */
  192. #include <linux/module.h>
  193. #include <linux/slab.h>
  194. #include <net/irda/irda.h>
  195. #include <net/irda/irqueue.h>
  196. /************************ QUEUE SUBROUTINES ************************/
  197. /*
  198. * Hashbin
  199. */
  200. #define GET_HASHBIN(x) ( x & HASHBIN_MASK )
  201. /*
  202. * Function hash (name)
  203. *
  204. * This function hash the input string 'name' using the ELF hash
  205. * function for strings.
  206. */
  207. static __u32 hash( const char* name)
  208. {
  209. __u32 h = 0;
  210. __u32 g;
  211. while(*name) {
  212. h = (h<<4) + *name++;
  213. if ((g = (h & 0xf0000000)))
  214. h ^=g>>24;
  215. h &=~g;
  216. }
  217. return h;
  218. }
  219. /*
  220. * Function enqueue_first (queue, proc)
  221. *
  222. * Insert item first in queue.
  223. *
  224. */
  225. static void enqueue_first(irda_queue_t **queue, irda_queue_t* element)
  226. {
  227. IRDA_DEBUG( 4, "%s()\n", __func__);
  228. /*
  229. * Check if queue is empty.
  230. */
  231. if ( *queue == NULL ) {
  232. /*
  233. * Queue is empty. Insert one element into the queue.
  234. */
  235. element->q_next = element->q_prev = *queue = element;
  236. } else {
  237. /*
  238. * Queue is not empty. Insert element into front of queue.
  239. */
  240. element->q_next = (*queue);
  241. (*queue)->q_prev->q_next = element;
  242. element->q_prev = (*queue)->q_prev;
  243. (*queue)->q_prev = element;
  244. (*queue) = element;
  245. }
  246. }
  247. /*
  248. * Function dequeue (queue)
  249. *
  250. * Remove first entry in queue
  251. *
  252. */
  253. static irda_queue_t *dequeue_first(irda_queue_t **queue)
  254. {
  255. irda_queue_t *ret;
  256. IRDA_DEBUG( 4, "dequeue_first()\n");
  257. /*
  258. * Set return value
  259. */
  260. ret = *queue;
  261. if ( *queue == NULL ) {
  262. /*
  263. * Queue was empty.
  264. */
  265. } else if ( (*queue)->q_next == *queue ) {
  266. /*
  267. * Queue only contained a single element. It will now be
  268. * empty.
  269. */
  270. *queue = NULL;
  271. } else {
  272. /*
  273. * Queue contained several element. Remove the first one.
  274. */
  275. (*queue)->q_prev->q_next = (*queue)->q_next;
  276. (*queue)->q_next->q_prev = (*queue)->q_prev;
  277. *queue = (*queue)->q_next;
  278. }
  279. /*
  280. * Return the removed entry (or NULL of queue was empty).
  281. */
  282. return ret;
  283. }
  284. /*
  285. * Function dequeue_general (queue, element)
  286. *
  287. *
  288. */
  289. static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element)
  290. {
  291. irda_queue_t *ret;
  292. IRDA_DEBUG( 4, "dequeue_general()\n");
  293. /*
  294. * Set return value
  295. */
  296. ret = *queue;
  297. if ( *queue == NULL ) {
  298. /*
  299. * Queue was empty.
  300. */
  301. } else if ( (*queue)->q_next == *queue ) {
  302. /*
  303. * Queue only contained a single element. It will now be
  304. * empty.
  305. */
  306. *queue = NULL;
  307. } else {
  308. /*
  309. * Remove specific element.
  310. */
  311. element->q_prev->q_next = element->q_next;
  312. element->q_next->q_prev = element->q_prev;
  313. if ( (*queue) == element)
  314. (*queue) = element->q_next;
  315. }
  316. /*
  317. * Return the removed entry (or NULL of queue was empty).
  318. */
  319. return ret;
  320. }
  321. /************************ HASHBIN MANAGEMENT ************************/
  322. /*
  323. * Function hashbin_create ( type, name )
  324. *
  325. * Create hashbin!
  326. *
  327. */
  328. hashbin_t *hashbin_new(int type)
  329. {
  330. hashbin_t* hashbin;
  331. /*
  332. * Allocate new hashbin
  333. */
  334. hashbin = kzalloc(sizeof(*hashbin), GFP_ATOMIC);
  335. if (!hashbin)
  336. return NULL;
  337. /*
  338. * Initialize structure
  339. */
  340. hashbin->hb_type = type;
  341. hashbin->magic = HB_MAGIC;
  342. //hashbin->hb_current = NULL;
  343. /* Make sure all spinlock's are unlocked */
  344. if ( hashbin->hb_type & HB_LOCK ) {
  345. spin_lock_init(&hashbin->hb_spinlock);
  346. }
  347. return hashbin;
  348. }
  349. EXPORT_SYMBOL(hashbin_new);
  350. /*
  351. * Function hashbin_delete (hashbin, free_func)
  352. *
  353. * Destroy hashbin, the free_func can be a user supplied special routine
  354. * for deallocating this structure if it's complex. If not the user can
  355. * just supply kfree, which should take care of the job.
  356. */
  357. #ifdef CONFIG_LOCKDEP
  358. static int hashbin_lock_depth = 0;
  359. #endif
  360. int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
  361. {
  362. irda_queue_t* queue;
  363. unsigned long flags = 0;
  364. int i;
  365. IRDA_ASSERT(hashbin != NULL, return -1;);
  366. IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
  367. /* Synchronize */
  368. if ( hashbin->hb_type & HB_LOCK ) {
  369. spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags,
  370. hashbin_lock_depth++);
  371. }
  372. /*
  373. * Free the entries in the hashbin, TODO: use hashbin_clear when
  374. * it has been shown to work
  375. */
  376. for (i = 0; i < HASHBIN_SIZE; i ++ ) {
  377. queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
  378. while (queue ) {
  379. if (free_func)
  380. (*free_func)(queue);
  381. queue = dequeue_first(
  382. (irda_queue_t**) &hashbin->hb_queue[i]);
  383. }
  384. }
  385. /* Cleanup local data */
  386. hashbin->hb_current = NULL;
  387. hashbin->magic = ~HB_MAGIC;
  388. /* Release lock */
  389. if ( hashbin->hb_type & HB_LOCK) {
  390. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  391. #ifdef CONFIG_LOCKDEP
  392. hashbin_lock_depth--;
  393. #endif
  394. }
  395. /*
  396. * Free the hashbin structure
  397. */
  398. kfree(hashbin);
  399. return 0;
  400. }
  401. EXPORT_SYMBOL(hashbin_delete);
  402. /********************* HASHBIN LIST OPERATIONS *********************/
  403. /*
  404. * Function hashbin_insert (hashbin, entry, name)
  405. *
  406. * Insert an entry into the hashbin
  407. *
  408. */
  409. void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv,
  410. const char* name)
  411. {
  412. unsigned long flags = 0;
  413. int bin;
  414. IRDA_DEBUG( 4, "%s()\n", __func__);
  415. IRDA_ASSERT( hashbin != NULL, return;);
  416. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;);
  417. /*
  418. * Locate hashbin
  419. */
  420. if ( name )
  421. hashv = hash( name );
  422. bin = GET_HASHBIN( hashv );
  423. /* Synchronize */
  424. if ( hashbin->hb_type & HB_LOCK ) {
  425. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  426. } /* Default is no-lock */
  427. /*
  428. * Store name and key
  429. */
  430. entry->q_hash = hashv;
  431. if ( name )
  432. strlcpy( entry->q_name, name, sizeof(entry->q_name));
  433. /*
  434. * Insert new entry first
  435. */
  436. enqueue_first( (irda_queue_t**) &hashbin->hb_queue[ bin ],
  437. entry);
  438. hashbin->hb_size++;
  439. /* Release lock */
  440. if ( hashbin->hb_type & HB_LOCK ) {
  441. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  442. } /* Default is no-lock */
  443. }
  444. EXPORT_SYMBOL(hashbin_insert);
  445. /*
  446. * Function hashbin_remove_first (hashbin)
  447. *
  448. * Remove first entry of the hashbin
  449. *
  450. * Note : this function no longer use hashbin_remove(), but does things
  451. * similar to hashbin_remove_this(), so can be considered safe.
  452. * Jean II
  453. */
  454. void *hashbin_remove_first( hashbin_t *hashbin)
  455. {
  456. unsigned long flags = 0;
  457. irda_queue_t *entry = NULL;
  458. /* Synchronize */
  459. if ( hashbin->hb_type & HB_LOCK ) {
  460. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  461. } /* Default is no-lock */
  462. entry = hashbin_get_first( hashbin);
  463. if ( entry != NULL) {
  464. int bin;
  465. long hashv;
  466. /*
  467. * Locate hashbin
  468. */
  469. hashv = entry->q_hash;
  470. bin = GET_HASHBIN( hashv );
  471. /*
  472. * Dequeue the entry...
  473. */
  474. dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
  475. (irda_queue_t*) entry );
  476. hashbin->hb_size--;
  477. entry->q_next = NULL;
  478. entry->q_prev = NULL;
  479. /*
  480. * Check if this item is the currently selected item, and in
  481. * that case we must reset hb_current
  482. */
  483. if ( entry == hashbin->hb_current)
  484. hashbin->hb_current = NULL;
  485. }
  486. /* Release lock */
  487. if ( hashbin->hb_type & HB_LOCK ) {
  488. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  489. } /* Default is no-lock */
  490. return entry;
  491. }
  492. /*
  493. * Function hashbin_remove (hashbin, hashv, name)
  494. *
  495. * Remove entry with the given name
  496. *
  497. * The use of this function is highly discouraged, because the whole
  498. * concept behind hashbin_remove() is broken. In many cases, it's not
  499. * possible to guarantee the unicity of the index (either hashv or name),
  500. * leading to removing the WRONG entry.
  501. * The only simple safe use is :
  502. * hashbin_remove(hasbin, (int) self, NULL);
  503. * In other case, you must think hard to guarantee unicity of the index.
  504. * Jean II
  505. */
  506. void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name)
  507. {
  508. int bin, found = FALSE;
  509. unsigned long flags = 0;
  510. irda_queue_t* entry;
  511. IRDA_DEBUG( 4, "%s()\n", __func__);
  512. IRDA_ASSERT( hashbin != NULL, return NULL;);
  513. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  514. /*
  515. * Locate hashbin
  516. */
  517. if ( name )
  518. hashv = hash( name );
  519. bin = GET_HASHBIN( hashv );
  520. /* Synchronize */
  521. if ( hashbin->hb_type & HB_LOCK ) {
  522. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  523. } /* Default is no-lock */
  524. /*
  525. * Search for entry
  526. */
  527. entry = hashbin->hb_queue[ bin ];
  528. if ( entry ) {
  529. do {
  530. /*
  531. * Check for key
  532. */
  533. if ( entry->q_hash == hashv ) {
  534. /*
  535. * Name compare too?
  536. */
  537. if ( name ) {
  538. if ( strcmp( entry->q_name, name) == 0)
  539. {
  540. found = TRUE;
  541. break;
  542. }
  543. } else {
  544. found = TRUE;
  545. break;
  546. }
  547. }
  548. entry = entry->q_next;
  549. } while ( entry != hashbin->hb_queue[ bin ] );
  550. }
  551. /*
  552. * If entry was found, dequeue it
  553. */
  554. if ( found ) {
  555. dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
  556. (irda_queue_t*) entry );
  557. hashbin->hb_size--;
  558. /*
  559. * Check if this item is the currently selected item, and in
  560. * that case we must reset hb_current
  561. */
  562. if ( entry == hashbin->hb_current)
  563. hashbin->hb_current = NULL;
  564. }
  565. /* Release lock */
  566. if ( hashbin->hb_type & HB_LOCK ) {
  567. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  568. } /* Default is no-lock */
  569. /* Return */
  570. if ( found )
  571. return entry;
  572. else
  573. return NULL;
  574. }
  575. EXPORT_SYMBOL(hashbin_remove);
  576. /*
  577. * Function hashbin_remove_this (hashbin, entry)
  578. *
  579. * Remove entry with the given name
  580. *
  581. * In some cases, the user of hashbin can't guarantee the unicity
  582. * of either the hashv or name.
  583. * In those cases, using the above function is guaranteed to cause troubles,
  584. * so we use this one instead...
  585. * And by the way, it's also faster, because we skip the search phase ;-)
  586. */
  587. void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry)
  588. {
  589. unsigned long flags = 0;
  590. int bin;
  591. long hashv;
  592. IRDA_DEBUG( 4, "%s()\n", __func__);
  593. IRDA_ASSERT( hashbin != NULL, return NULL;);
  594. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  595. IRDA_ASSERT( entry != NULL, return NULL;);
  596. /* Synchronize */
  597. if ( hashbin->hb_type & HB_LOCK ) {
  598. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  599. } /* Default is no-lock */
  600. /* Check if valid and not already removed... */
  601. if((entry->q_next == NULL) || (entry->q_prev == NULL)) {
  602. entry = NULL;
  603. goto out;
  604. }
  605. /*
  606. * Locate hashbin
  607. */
  608. hashv = entry->q_hash;
  609. bin = GET_HASHBIN( hashv );
  610. /*
  611. * Dequeue the entry...
  612. */
  613. dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
  614. (irda_queue_t*) entry );
  615. hashbin->hb_size--;
  616. entry->q_next = NULL;
  617. entry->q_prev = NULL;
  618. /*
  619. * Check if this item is the currently selected item, and in
  620. * that case we must reset hb_current
  621. */
  622. if ( entry == hashbin->hb_current)
  623. hashbin->hb_current = NULL;
  624. out:
  625. /* Release lock */
  626. if ( hashbin->hb_type & HB_LOCK ) {
  627. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  628. } /* Default is no-lock */
  629. return entry;
  630. }
  631. EXPORT_SYMBOL(hashbin_remove_this);
  632. /*********************** HASHBIN ENUMERATION ***********************/
  633. /*
  634. * Function hashbin_common_find (hashbin, hashv, name)
  635. *
  636. * Find item with the given hashv or name
  637. *
  638. */
  639. void* hashbin_find( hashbin_t* hashbin, long hashv, const char* name )
  640. {
  641. int bin;
  642. irda_queue_t* entry;
  643. IRDA_DEBUG( 4, "hashbin_find()\n");
  644. IRDA_ASSERT( hashbin != NULL, return NULL;);
  645. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  646. /*
  647. * Locate hashbin
  648. */
  649. if ( name )
  650. hashv = hash( name );
  651. bin = GET_HASHBIN( hashv );
  652. /*
  653. * Search for entry
  654. */
  655. entry = hashbin->hb_queue[ bin];
  656. if ( entry ) {
  657. do {
  658. /*
  659. * Check for key
  660. */
  661. if ( entry->q_hash == hashv ) {
  662. /*
  663. * Name compare too?
  664. */
  665. if ( name ) {
  666. if ( strcmp( entry->q_name, name ) == 0 ) {
  667. return entry;
  668. }
  669. } else {
  670. return entry;
  671. }
  672. }
  673. entry = entry->q_next;
  674. } while ( entry != hashbin->hb_queue[ bin ] );
  675. }
  676. return NULL;
  677. }
  678. EXPORT_SYMBOL(hashbin_find);
  679. /*
  680. * Function hashbin_lock_find (hashbin, hashv, name)
  681. *
  682. * Find item with the given hashv or name
  683. *
  684. * Same, but with spinlock protection...
  685. * I call it safe, but it's only safe with respect to the hashbin, not its
  686. * content. - Jean II
  687. */
  688. void* hashbin_lock_find( hashbin_t* hashbin, long hashv, const char* name )
  689. {
  690. unsigned long flags = 0;
  691. irda_queue_t* entry;
  692. /* Synchronize */
  693. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  694. /*
  695. * Search for entry
  696. */
  697. entry = (irda_queue_t* ) hashbin_find( hashbin, hashv, name );
  698. /* Release lock */
  699. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  700. return entry;
  701. }
  702. EXPORT_SYMBOL(hashbin_lock_find);
  703. /*
  704. * Function hashbin_find (hashbin, hashv, name, pnext)
  705. *
  706. * Find an item with the given hashv or name, and its successor
  707. *
  708. * This function allow to do concurrent enumerations without the
  709. * need to lock over the whole session, because the caller keep the
  710. * context of the search. On the other hand, it might fail and return
  711. * NULL if the entry is removed. - Jean II
  712. */
  713. void* hashbin_find_next( hashbin_t* hashbin, long hashv, const char* name,
  714. void ** pnext)
  715. {
  716. unsigned long flags = 0;
  717. irda_queue_t* entry;
  718. /* Synchronize */
  719. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  720. /*
  721. * Search for current entry
  722. * This allow to check if the current item is still in the
  723. * hashbin or has been removed.
  724. */
  725. entry = (irda_queue_t* ) hashbin_find( hashbin, hashv, name );
  726. /*
  727. * Trick hashbin_get_next() to return what we want
  728. */
  729. if(entry) {
  730. hashbin->hb_current = entry;
  731. *pnext = hashbin_get_next( hashbin );
  732. } else
  733. *pnext = NULL;
  734. /* Release lock */
  735. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  736. return entry;
  737. }
  738. /*
  739. * Function hashbin_get_first (hashbin)
  740. *
  741. * Get a pointer to first element in hashbin, this function must be
  742. * called before any calls to hashbin_get_next()!
  743. *
  744. */
  745. irda_queue_t *hashbin_get_first( hashbin_t* hashbin)
  746. {
  747. irda_queue_t *entry;
  748. int i;
  749. IRDA_ASSERT( hashbin != NULL, return NULL;);
  750. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  751. if ( hashbin == NULL)
  752. return NULL;
  753. for ( i = 0; i < HASHBIN_SIZE; i ++ ) {
  754. entry = hashbin->hb_queue[ i];
  755. if ( entry) {
  756. hashbin->hb_current = entry;
  757. return entry;
  758. }
  759. }
  760. /*
  761. * Did not find any item in hashbin
  762. */
  763. return NULL;
  764. }
  765. EXPORT_SYMBOL(hashbin_get_first);
  766. /*
  767. * Function hashbin_get_next (hashbin)
  768. *
  769. * Get next item in hashbin. A series of hashbin_get_next() calls must
  770. * be started by a call to hashbin_get_first(). The function returns
  771. * NULL when all items have been traversed
  772. *
  773. * The context of the search is stored within the hashbin, so you must
  774. * protect yourself from concurrent enumerations. - Jean II
  775. */
  776. irda_queue_t *hashbin_get_next( hashbin_t *hashbin)
  777. {
  778. irda_queue_t* entry;
  779. int bin;
  780. int i;
  781. IRDA_ASSERT( hashbin != NULL, return NULL;);
  782. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  783. if ( hashbin->hb_current == NULL) {
  784. IRDA_ASSERT( hashbin->hb_current != NULL, return NULL;);
  785. return NULL;
  786. }
  787. entry = hashbin->hb_current->q_next;
  788. bin = GET_HASHBIN( entry->q_hash);
  789. /*
  790. * Make sure that we are not back at the beginning of the queue
  791. * again
  792. */
  793. if ( entry != hashbin->hb_queue[ bin ]) {
  794. hashbin->hb_current = entry;
  795. return entry;
  796. }
  797. /*
  798. * Check that this is not the last queue in hashbin
  799. */
  800. if ( bin >= HASHBIN_SIZE)
  801. return NULL;
  802. /*
  803. * Move to next queue in hashbin
  804. */
  805. bin++;
  806. for ( i = bin; i < HASHBIN_SIZE; i++ ) {
  807. entry = hashbin->hb_queue[ i];
  808. if ( entry) {
  809. hashbin->hb_current = entry;
  810. return entry;
  811. }
  812. }
  813. return NULL;
  814. }
  815. EXPORT_SYMBOL(hashbin_get_next);