seq_fifo.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. /*
  2. * ALSA sequencer FIFO
  3. * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
  4. *
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. */
  21. #include <sound/core.h>
  22. #include <linux/slab.h>
  23. #include "seq_fifo.h"
  24. #include "seq_lock.h"
  25. /* FIFO */
  26. /* create new fifo */
  27. struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
  28. {
  29. struct snd_seq_fifo *f;
  30. f = kzalloc(sizeof(*f), GFP_KERNEL);
  31. if (f == NULL) {
  32. snd_printd("malloc failed for snd_seq_fifo_new() \n");
  33. return NULL;
  34. }
  35. f->pool = snd_seq_pool_new(poolsize);
  36. if (f->pool == NULL) {
  37. kfree(f);
  38. return NULL;
  39. }
  40. if (snd_seq_pool_init(f->pool) < 0) {
  41. snd_seq_pool_delete(&f->pool);
  42. kfree(f);
  43. return NULL;
  44. }
  45. spin_lock_init(&f->lock);
  46. snd_use_lock_init(&f->use_lock);
  47. init_waitqueue_head(&f->input_sleep);
  48. atomic_set(&f->overflow, 0);
  49. f->head = NULL;
  50. f->tail = NULL;
  51. f->cells = 0;
  52. return f;
  53. }
  54. void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
  55. {
  56. struct snd_seq_fifo *f;
  57. if (snd_BUG_ON(!fifo))
  58. return;
  59. f = *fifo;
  60. if (snd_BUG_ON(!f))
  61. return;
  62. *fifo = NULL;
  63. snd_seq_fifo_clear(f);
  64. /* wake up clients if any */
  65. if (waitqueue_active(&f->input_sleep))
  66. wake_up(&f->input_sleep);
  67. /* release resources...*/
  68. /*....................*/
  69. if (f->pool) {
  70. snd_seq_pool_done(f->pool);
  71. snd_seq_pool_delete(&f->pool);
  72. }
  73. kfree(f);
  74. }
  75. static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f);
  76. /* clear queue */
  77. void snd_seq_fifo_clear(struct snd_seq_fifo *f)
  78. {
  79. struct snd_seq_event_cell *cell;
  80. unsigned long flags;
  81. /* clear overflow flag */
  82. atomic_set(&f->overflow, 0);
  83. snd_use_lock_sync(&f->use_lock);
  84. spin_lock_irqsave(&f->lock, flags);
  85. /* drain the fifo */
  86. while ((cell = fifo_cell_out(f)) != NULL) {
  87. snd_seq_cell_free(cell);
  88. }
  89. spin_unlock_irqrestore(&f->lock, flags);
  90. }
  91. /* enqueue event to fifo */
  92. int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
  93. struct snd_seq_event *event)
  94. {
  95. struct snd_seq_event_cell *cell;
  96. unsigned long flags;
  97. int err;
  98. if (snd_BUG_ON(!f))
  99. return -EINVAL;
  100. snd_use_lock_use(&f->use_lock);
  101. err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
  102. if (err < 0) {
  103. if (err == -ENOMEM)
  104. atomic_inc(&f->overflow);
  105. snd_use_lock_free(&f->use_lock);
  106. return err;
  107. }
  108. /* append new cells to fifo */
  109. spin_lock_irqsave(&f->lock, flags);
  110. if (f->tail != NULL)
  111. f->tail->next = cell;
  112. f->tail = cell;
  113. if (f->head == NULL)
  114. f->head = cell;
  115. f->cells++;
  116. spin_unlock_irqrestore(&f->lock, flags);
  117. /* wakeup client */
  118. if (waitqueue_active(&f->input_sleep))
  119. wake_up(&f->input_sleep);
  120. snd_use_lock_free(&f->use_lock);
  121. return 0; /* success */
  122. }
  123. /* dequeue cell from fifo */
  124. static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f)
  125. {
  126. struct snd_seq_event_cell *cell;
  127. if ((cell = f->head) != NULL) {
  128. f->head = cell->next;
  129. /* reset tail if this was the last element */
  130. if (f->tail == cell)
  131. f->tail = NULL;
  132. cell->next = NULL;
  133. f->cells--;
  134. }
  135. return cell;
  136. }
  137. /* dequeue cell from fifo and copy on user space */
  138. int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
  139. struct snd_seq_event_cell **cellp, int nonblock)
  140. {
  141. struct snd_seq_event_cell *cell;
  142. unsigned long flags;
  143. wait_queue_t wait;
  144. if (snd_BUG_ON(!f))
  145. return -EINVAL;
  146. *cellp = NULL;
  147. init_waitqueue_entry(&wait, current);
  148. spin_lock_irqsave(&f->lock, flags);
  149. while ((cell = fifo_cell_out(f)) == NULL) {
  150. if (nonblock) {
  151. /* non-blocking - return immediately */
  152. spin_unlock_irqrestore(&f->lock, flags);
  153. return -EAGAIN;
  154. }
  155. set_current_state(TASK_INTERRUPTIBLE);
  156. add_wait_queue(&f->input_sleep, &wait);
  157. spin_unlock_irq(&f->lock);
  158. schedule();
  159. spin_lock_irq(&f->lock);
  160. remove_wait_queue(&f->input_sleep, &wait);
  161. if (signal_pending(current)) {
  162. spin_unlock_irqrestore(&f->lock, flags);
  163. return -ERESTARTSYS;
  164. }
  165. }
  166. spin_unlock_irqrestore(&f->lock, flags);
  167. *cellp = cell;
  168. return 0;
  169. }
  170. void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
  171. struct snd_seq_event_cell *cell)
  172. {
  173. unsigned long flags;
  174. if (cell) {
  175. spin_lock_irqsave(&f->lock, flags);
  176. cell->next = f->head;
  177. f->head = cell;
  178. f->cells++;
  179. spin_unlock_irqrestore(&f->lock, flags);
  180. }
  181. }
  182. /* polling; return non-zero if queue is available */
  183. int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file,
  184. poll_table *wait)
  185. {
  186. poll_wait(file, &f->input_sleep, wait);
  187. return (f->cells > 0);
  188. }
  189. /* change the size of pool; all old events are removed */
  190. int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
  191. {
  192. unsigned long flags;
  193. struct snd_seq_pool *newpool, *oldpool;
  194. struct snd_seq_event_cell *cell, *next, *oldhead;
  195. if (snd_BUG_ON(!f || !f->pool))
  196. return -EINVAL;
  197. /* allocate new pool */
  198. newpool = snd_seq_pool_new(poolsize);
  199. if (newpool == NULL)
  200. return -ENOMEM;
  201. if (snd_seq_pool_init(newpool) < 0) {
  202. snd_seq_pool_delete(&newpool);
  203. return -ENOMEM;
  204. }
  205. spin_lock_irqsave(&f->lock, flags);
  206. /* remember old pool */
  207. oldpool = f->pool;
  208. oldhead = f->head;
  209. /* exchange pools */
  210. f->pool = newpool;
  211. f->head = NULL;
  212. f->tail = NULL;
  213. f->cells = 0;
  214. /* NOTE: overflow flag is not cleared */
  215. spin_unlock_irqrestore(&f->lock, flags);
  216. /* release cells in old pool */
  217. for (cell = oldhead; cell; cell = next) {
  218. next = cell->next;
  219. snd_seq_cell_free(cell);
  220. }
  221. snd_seq_pool_delete(&oldpool);
  222. return 0;
  223. }