ivtv-queue.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. /*
  2. buffer queues.
  3. Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
  4. Copyright (C) 2004 Chris Kennedy <c@groovy.org>
  5. Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
  6. This program is free software; you can redistribute it and/or modify
  7. it under the terms of the GNU General Public License as published by
  8. the Free Software Foundation; either version 2 of the License, or
  9. (at your option) any later version.
  10. This program is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. GNU General Public License for more details.
  14. You should have received a copy of the GNU General Public License
  15. along with this program; if not, write to the Free Software
  16. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. */
  18. #include "ivtv-driver.h"
  19. #include "ivtv-queue.h"
  20. int ivtv_buf_copy_from_user(struct ivtv_stream *s, struct ivtv_buffer *buf, const char __user *src, int copybytes)
  21. {
  22. if (s->buf_size - buf->bytesused < copybytes)
  23. copybytes = s->buf_size - buf->bytesused;
  24. if (copy_from_user(buf->buf + buf->bytesused, src, copybytes)) {
  25. return -EFAULT;
  26. }
  27. buf->bytesused += copybytes;
  28. return copybytes;
  29. }
  30. void ivtv_buf_swap(struct ivtv_buffer *buf)
  31. {
  32. int i;
  33. for (i = 0; i < buf->bytesused; i += 4)
  34. swab32s((u32 *)(buf->buf + i));
  35. }
  36. void ivtv_queue_init(struct ivtv_queue *q)
  37. {
  38. INIT_LIST_HEAD(&q->list);
  39. q->buffers = 0;
  40. q->length = 0;
  41. q->bytesused = 0;
  42. }
  43. void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q)
  44. {
  45. unsigned long flags;
  46. /* clear the buffer if it is going to be enqueued to the free queue */
  47. if (q == &s->q_free) {
  48. buf->bytesused = 0;
  49. buf->readpos = 0;
  50. buf->b_flags = 0;
  51. buf->dma_xfer_cnt = 0;
  52. }
  53. spin_lock_irqsave(&s->qlock, flags);
  54. list_add_tail(&buf->list, &q->list);
  55. q->buffers++;
  56. q->length += s->buf_size;
  57. q->bytesused += buf->bytesused - buf->readpos;
  58. spin_unlock_irqrestore(&s->qlock, flags);
  59. }
  60. struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q)
  61. {
  62. struct ivtv_buffer *buf = NULL;
  63. unsigned long flags;
  64. spin_lock_irqsave(&s->qlock, flags);
  65. if (!list_empty(&q->list)) {
  66. buf = list_entry(q->list.next, struct ivtv_buffer, list);
  67. list_del_init(q->list.next);
  68. q->buffers--;
  69. q->length -= s->buf_size;
  70. q->bytesused -= buf->bytesused - buf->readpos;
  71. }
  72. spin_unlock_irqrestore(&s->qlock, flags);
  73. return buf;
  74. }
  75. static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from,
  76. struct ivtv_queue *to, int clear)
  77. {
  78. struct ivtv_buffer *buf = list_entry(from->list.next, struct ivtv_buffer, list);
  79. list_move_tail(from->list.next, &to->list);
  80. from->buffers--;
  81. from->length -= s->buf_size;
  82. from->bytesused -= buf->bytesused - buf->readpos;
  83. /* special handling for q_free */
  84. if (clear)
  85. buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
  86. to->buffers++;
  87. to->length += s->buf_size;
  88. to->bytesused += buf->bytesused - buf->readpos;
  89. }
  90. /* Move 'needed_bytes' worth of buffers from queue 'from' into queue 'to'.
  91. If 'needed_bytes' == 0, then move all buffers from 'from' into 'to'.
  92. If 'steal' != NULL, then buffers may also taken from that queue if
  93. needed, but only if 'from' is the free queue.
  94. The buffer is automatically cleared if it goes to the free queue. It is
  95. also cleared if buffers need to be taken from the 'steal' queue and
  96. the 'from' queue is the free queue.
  97. When 'from' is q_free, then needed_bytes is compared to the total
  98. available buffer length, otherwise needed_bytes is compared to the
  99. bytesused value. For the 'steal' queue the total available buffer
  100. length is always used.
  101. -ENOMEM is returned if the buffers could not be obtained, 0 if all
  102. buffers where obtained from the 'from' list and if non-zero then
  103. the number of stolen buffers is returned. */
  104. int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal,
  105. struct ivtv_queue *to, int needed_bytes)
  106. {
  107. unsigned long flags;
  108. int rc = 0;
  109. int from_free = from == &s->q_free;
  110. int to_free = to == &s->q_free;
  111. int bytes_available, bytes_steal;
  112. spin_lock_irqsave(&s->qlock, flags);
  113. if (needed_bytes == 0) {
  114. from_free = 1;
  115. needed_bytes = from->length;
  116. }
  117. bytes_available = from_free ? from->length : from->bytesused;
  118. bytes_steal = (from_free && steal) ? steal->length : 0;
  119. if (bytes_available + bytes_steal < needed_bytes) {
  120. spin_unlock_irqrestore(&s->qlock, flags);
  121. return -ENOMEM;
  122. }
  123. while (steal && bytes_available < needed_bytes) {
  124. struct ivtv_buffer *buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
  125. u16 dma_xfer_cnt = buf->dma_xfer_cnt;
  126. /* move buffers from the tail of the 'steal' queue to the tail of the
  127. 'from' queue. Always copy all the buffers with the same dma_xfer_cnt
  128. value, this ensures that you do not end up with partial frame data
  129. if one frame is stored in multiple buffers. */
  130. while (dma_xfer_cnt == buf->dma_xfer_cnt) {
  131. list_move_tail(steal->list.prev, &from->list);
  132. rc++;
  133. steal->buffers--;
  134. steal->length -= s->buf_size;
  135. steal->bytesused -= buf->bytesused - buf->readpos;
  136. buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
  137. from->buffers++;
  138. from->length += s->buf_size;
  139. bytes_available += s->buf_size;
  140. if (list_empty(&steal->list))
  141. break;
  142. buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
  143. }
  144. }
  145. if (from_free) {
  146. u32 old_length = to->length;
  147. while (to->length - old_length < needed_bytes) {
  148. ivtv_queue_move_buf(s, from, to, 1);
  149. }
  150. }
  151. else {
  152. u32 old_bytesused = to->bytesused;
  153. while (to->bytesused - old_bytesused < needed_bytes) {
  154. ivtv_queue_move_buf(s, from, to, to_free);
  155. }
  156. }
  157. spin_unlock_irqrestore(&s->qlock, flags);
  158. return rc;
  159. }
  160. void ivtv_flush_queues(struct ivtv_stream *s)
  161. {
  162. ivtv_queue_move(s, &s->q_io, NULL, &s->q_free, 0);
  163. ivtv_queue_move(s, &s->q_full, NULL, &s->q_free, 0);
  164. ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
  165. ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
  166. }
  167. int ivtv_stream_alloc(struct ivtv_stream *s)
  168. {
  169. struct ivtv *itv = s->itv;
  170. int SGsize = sizeof(struct ivtv_sg_host_element) * s->buffers;
  171. int i;
  172. if (s->buffers == 0)
  173. return 0;
  174. IVTV_DEBUG_INFO("Allocate %s%s stream: %d x %d buffers (%dkB total)\n",
  175. s->dma != PCI_DMA_NONE ? "DMA " : "",
  176. s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024);
  177. s->sg_pending = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
  178. if (s->sg_pending == NULL) {
  179. IVTV_ERR("Could not allocate sg_pending for %s stream\n", s->name);
  180. return -ENOMEM;
  181. }
  182. s->sg_pending_size = 0;
  183. s->sg_processing = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
  184. if (s->sg_processing == NULL) {
  185. IVTV_ERR("Could not allocate sg_processing for %s stream\n", s->name);
  186. kfree(s->sg_pending);
  187. s->sg_pending = NULL;
  188. return -ENOMEM;
  189. }
  190. s->sg_processing_size = 0;
  191. s->sg_dma = kzalloc(sizeof(struct ivtv_sg_element),
  192. GFP_KERNEL|__GFP_NOWARN);
  193. if (s->sg_dma == NULL) {
  194. IVTV_ERR("Could not allocate sg_dma for %s stream\n", s->name);
  195. kfree(s->sg_pending);
  196. s->sg_pending = NULL;
  197. kfree(s->sg_processing);
  198. s->sg_processing = NULL;
  199. return -ENOMEM;
  200. }
  201. if (ivtv_might_use_dma(s)) {
  202. s->sg_handle = pci_map_single(itv->pdev, s->sg_dma,
  203. sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
  204. ivtv_stream_sync_for_cpu(s);
  205. }
  206. /* allocate stream buffers. Initially all buffers are in q_free. */
  207. for (i = 0; i < s->buffers; i++) {
  208. struct ivtv_buffer *buf = kzalloc(sizeof(struct ivtv_buffer),
  209. GFP_KERNEL|__GFP_NOWARN);
  210. if (buf == NULL)
  211. break;
  212. buf->buf = kmalloc(s->buf_size + 256, GFP_KERNEL|__GFP_NOWARN);
  213. if (buf->buf == NULL) {
  214. kfree(buf);
  215. break;
  216. }
  217. INIT_LIST_HEAD(&buf->list);
  218. if (ivtv_might_use_dma(s)) {
  219. buf->dma_handle = pci_map_single(s->itv->pdev,
  220. buf->buf, s->buf_size + 256, s->dma);
  221. ivtv_buf_sync_for_cpu(s, buf);
  222. }
  223. ivtv_enqueue(s, buf, &s->q_free);
  224. }
  225. if (i == s->buffers)
  226. return 0;
  227. IVTV_ERR("Couldn't allocate buffers for %s stream\n", s->name);
  228. ivtv_stream_free(s);
  229. return -ENOMEM;
  230. }
  231. void ivtv_stream_free(struct ivtv_stream *s)
  232. {
  233. struct ivtv_buffer *buf;
  234. /* move all buffers to q_free */
  235. ivtv_flush_queues(s);
  236. /* empty q_free */
  237. while ((buf = ivtv_dequeue(s, &s->q_free))) {
  238. if (ivtv_might_use_dma(s))
  239. pci_unmap_single(s->itv->pdev, buf->dma_handle,
  240. s->buf_size + 256, s->dma);
  241. kfree(buf->buf);
  242. kfree(buf);
  243. }
  244. /* Free SG Array/Lists */
  245. if (s->sg_dma != NULL) {
  246. if (s->sg_handle != IVTV_DMA_UNMAPPED) {
  247. pci_unmap_single(s->itv->pdev, s->sg_handle,
  248. sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
  249. s->sg_handle = IVTV_DMA_UNMAPPED;
  250. }
  251. kfree(s->sg_pending);
  252. kfree(s->sg_processing);
  253. kfree(s->sg_dma);
  254. s->sg_pending = NULL;
  255. s->sg_processing = NULL;
  256. s->sg_dma = NULL;
  257. s->sg_pending_size = 0;
  258. s->sg_processing_size = 0;
  259. }
  260. }