cx88-vbi.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. */
  4. #include "cx88.h"
  5. #include <linux/kernel.h>
  6. #include <linux/module.h>
  7. #include <linux/init.h>
  8. static unsigned int vbi_debug;
  9. module_param(vbi_debug, int, 0644);
  10. MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]");
  11. #define dprintk(level, fmt, arg...) do { \
  12. if (vbi_debug >= level) \
  13. printk(KERN_DEBUG pr_fmt("%s: vbi:" fmt), \
  14. __func__, ##arg); \
  15. } while (0)
  16. /* ------------------------------------------------------------------ */
  17. int cx8800_vbi_fmt(struct file *file, void *priv,
  18. struct v4l2_format *f)
  19. {
  20. struct cx8800_dev *dev = video_drvdata(file);
  21. f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
  22. f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
  23. f->fmt.vbi.offset = 244;
  24. if (dev->core->tvnorm & V4L2_STD_525_60) {
  25. /* ntsc */
  26. f->fmt.vbi.sampling_rate = 28636363;
  27. f->fmt.vbi.start[0] = 10;
  28. f->fmt.vbi.start[1] = 273;
  29. f->fmt.vbi.count[0] = VBI_LINE_NTSC_COUNT;
  30. f->fmt.vbi.count[1] = VBI_LINE_NTSC_COUNT;
  31. } else if (dev->core->tvnorm & V4L2_STD_625_50) {
  32. /* pal */
  33. f->fmt.vbi.sampling_rate = 35468950;
  34. f->fmt.vbi.start[0] = V4L2_VBI_ITU_625_F1_START + 5;
  35. f->fmt.vbi.start[1] = V4L2_VBI_ITU_625_F2_START + 5;
  36. f->fmt.vbi.count[0] = VBI_LINE_PAL_COUNT;
  37. f->fmt.vbi.count[1] = VBI_LINE_PAL_COUNT;
  38. }
  39. return 0;
  40. }
  41. static int cx8800_start_vbi_dma(struct cx8800_dev *dev,
  42. struct cx88_dmaqueue *q,
  43. struct cx88_buffer *buf)
  44. {
  45. struct cx88_core *core = dev->core;
  46. /* setup fifo + format */
  47. cx88_sram_channel_setup(dev->core, &cx88_sram_channels[SRAM_CH24],
  48. VBI_LINE_LENGTH, buf->risc.dma);
  49. cx_write(MO_VBOS_CONTROL, (1 << 18) | /* comb filter delay fixup */
  50. (1 << 15) | /* enable vbi capture */
  51. (1 << 11));
  52. /* reset counter */
  53. cx_write(MO_VBI_GPCNTRL, GP_COUNT_CONTROL_RESET);
  54. q->count = 0;
  55. /* enable irqs */
  56. cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_VIDINT);
  57. cx_set(MO_VID_INTMSK, 0x0f0088);
  58. /* enable capture */
  59. cx_set(VID_CAPTURE_CONTROL, 0x18);
  60. /* start dma */
  61. cx_set(MO_DEV_CNTRL2, (1 << 5));
  62. cx_set(MO_VID_DMACNTRL, 0x88);
  63. return 0;
  64. }
  65. void cx8800_stop_vbi_dma(struct cx8800_dev *dev)
  66. {
  67. struct cx88_core *core = dev->core;
  68. /* stop dma */
  69. cx_clear(MO_VID_DMACNTRL, 0x88);
  70. /* disable capture */
  71. cx_clear(VID_CAPTURE_CONTROL, 0x18);
  72. /* disable irqs */
  73. cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT);
  74. cx_clear(MO_VID_INTMSK, 0x0f0088);
  75. }
  76. int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
  77. struct cx88_dmaqueue *q)
  78. {
  79. struct cx88_buffer *buf;
  80. if (list_empty(&q->active))
  81. return 0;
  82. buf = list_entry(q->active.next, struct cx88_buffer, list);
  83. dprintk(2, "restart_queue [%p/%d]: restart dma\n",
  84. buf, buf->vb.vb2_buf.index);
  85. cx8800_start_vbi_dma(dev, q, buf);
  86. return 0;
  87. }
  88. /* ------------------------------------------------------------------ */
  89. static int queue_setup(struct vb2_queue *q,
  90. unsigned int *num_buffers, unsigned int *num_planes,
  91. unsigned int sizes[], struct device *alloc_devs[])
  92. {
  93. struct cx8800_dev *dev = q->drv_priv;
  94. *num_planes = 1;
  95. if (dev->core->tvnorm & V4L2_STD_525_60)
  96. sizes[0] = VBI_LINE_NTSC_COUNT * VBI_LINE_LENGTH * 2;
  97. else
  98. sizes[0] = VBI_LINE_PAL_COUNT * VBI_LINE_LENGTH * 2;
  99. return 0;
  100. }
  101. static int buffer_prepare(struct vb2_buffer *vb)
  102. {
  103. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  104. struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
  105. struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
  106. struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
  107. unsigned int lines;
  108. unsigned int size;
  109. if (dev->core->tvnorm & V4L2_STD_525_60)
  110. lines = VBI_LINE_NTSC_COUNT;
  111. else
  112. lines = VBI_LINE_PAL_COUNT;
  113. size = lines * VBI_LINE_LENGTH * 2;
  114. if (vb2_plane_size(vb, 0) < size)
  115. return -EINVAL;
  116. vb2_set_plane_payload(vb, 0, size);
  117. cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl,
  118. 0, VBI_LINE_LENGTH * lines,
  119. VBI_LINE_LENGTH, 0,
  120. lines);
  121. return 0;
  122. }
  123. static void buffer_finish(struct vb2_buffer *vb)
  124. {
  125. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  126. struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
  127. struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
  128. struct cx88_riscmem *risc = &buf->risc;
  129. if (risc->cpu)
  130. pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
  131. memset(risc, 0, sizeof(*risc));
  132. }
  133. static void buffer_queue(struct vb2_buffer *vb)
  134. {
  135. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  136. struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
  137. struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
  138. struct cx88_buffer *prev;
  139. struct cx88_dmaqueue *q = &dev->vbiq;
  140. /* add jump to start */
  141. buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
  142. buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
  143. buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
  144. if (list_empty(&q->active)) {
  145. list_add_tail(&buf->list, &q->active);
  146. dprintk(2, "[%p/%d] vbi_queue - first active\n",
  147. buf, buf->vb.vb2_buf.index);
  148. } else {
  149. buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
  150. prev = list_entry(q->active.prev, struct cx88_buffer, list);
  151. list_add_tail(&buf->list, &q->active);
  152. prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
  153. dprintk(2, "[%p/%d] buffer_queue - append to active\n",
  154. buf, buf->vb.vb2_buf.index);
  155. }
  156. }
  157. static int start_streaming(struct vb2_queue *q, unsigned int count)
  158. {
  159. struct cx8800_dev *dev = q->drv_priv;
  160. struct cx88_dmaqueue *dmaq = &dev->vbiq;
  161. struct cx88_buffer *buf = list_entry(dmaq->active.next,
  162. struct cx88_buffer, list);
  163. cx8800_start_vbi_dma(dev, dmaq, buf);
  164. return 0;
  165. }
  166. static void stop_streaming(struct vb2_queue *q)
  167. {
  168. struct cx8800_dev *dev = q->drv_priv;
  169. struct cx88_core *core = dev->core;
  170. struct cx88_dmaqueue *dmaq = &dev->vbiq;
  171. unsigned long flags;
  172. cx_clear(MO_VID_DMACNTRL, 0x11);
  173. cx_clear(VID_CAPTURE_CONTROL, 0x06);
  174. cx8800_stop_vbi_dma(dev);
  175. spin_lock_irqsave(&dev->slock, flags);
  176. while (!list_empty(&dmaq->active)) {
  177. struct cx88_buffer *buf = list_entry(dmaq->active.next,
  178. struct cx88_buffer, list);
  179. list_del(&buf->list);
  180. vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
  181. }
  182. spin_unlock_irqrestore(&dev->slock, flags);
  183. }
  184. const struct vb2_ops cx8800_vbi_qops = {
  185. .queue_setup = queue_setup,
  186. .buf_prepare = buffer_prepare,
  187. .buf_finish = buffer_finish,
  188. .buf_queue = buffer_queue,
  189. .wait_prepare = vb2_ops_wait_prepare,
  190. .wait_finish = vb2_ops_wait_finish,
  191. .start_streaming = start_streaming,
  192. .stop_streaming = stop_streaming,
  193. };