flexcop-pci.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. /*
  2. * Linux driver the digital TV devices equipped with B2C2 FlexcopII(b)/III
  3. * flexcop-pci.c - covers the PCI part including DMA transfers
  4. * see flexcop.c for copyright information
  5. */
  6. #define FC_LOG_PREFIX "flexcop-pci"
  7. #include "flexcop-common.h"
  8. static int enable_pid_filtering = 1;
  9. module_param(enable_pid_filtering, int, 0444);
  10. MODULE_PARM_DESC(enable_pid_filtering,
  11. "enable hardware pid filtering: supported values: 0 (fullts), 1");
  12. static int irq_chk_intv = 100;
  13. module_param(irq_chk_intv, int, 0644);
  14. MODULE_PARM_DESC(irq_chk_intv, "set the interval for IRQ streaming watchdog.");
  15. #ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
  16. #define dprintk(level,args...) \
  17. do { if ((debug & level)) printk(args); } while (0)
  18. #define DEBSTATUS ""
  19. #else
  20. #define dprintk(level,args...)
  21. #define DEBSTATUS " (debugging is not enabled)"
  22. #endif
  23. #define deb_info(args...) dprintk(0x01, args)
  24. #define deb_reg(args...) dprintk(0x02, args)
  25. #define deb_ts(args...) dprintk(0x04, args)
  26. #define deb_irq(args...) dprintk(0x08, args)
  27. #define deb_chk(args...) dprintk(0x10, args)
  28. static int debug;
  29. module_param(debug, int, 0644);
  30. MODULE_PARM_DESC(debug,
  31. "set debug level (1=info,2=regs,4=TS,8=irqdma,16=check (|-able))."
  32. DEBSTATUS);
  33. #define DRIVER_VERSION "0.1"
  34. #define DRIVER_NAME "flexcop-pci"
  35. #define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de>"
  36. struct flexcop_pci {
  37. struct pci_dev *pdev;
  38. #define FC_PCI_INIT 0x01
  39. #define FC_PCI_DMA_INIT 0x02
  40. int init_state;
  41. void __iomem *io_mem;
  42. u32 irq;
  43. /* buffersize (at least for DMA1, need to be % 188 == 0,
  44. * this logic is required */
  45. #define FC_DEFAULT_DMA1_BUFSIZE (1280 * 188)
  46. #define FC_DEFAULT_DMA2_BUFSIZE (10 * 188)
  47. struct flexcop_dma dma[2];
  48. int active_dma1_addr; /* 0 = addr0 of dma1; 1 = addr1 of dma1 */
  49. u32 last_dma1_cur_pos;
  50. /* position of the pointer last time the timer/packet irq occurred */
  51. int count;
  52. int count_prev;
  53. int stream_problem;
  54. spinlock_t irq_lock;
  55. unsigned long last_irq;
  56. struct delayed_work irq_check_work;
  57. struct flexcop_device *fc_dev;
  58. };
  59. static int lastwreg, lastwval, lastrreg, lastrval;
  60. static flexcop_ibi_value flexcop_pci_read_ibi_reg(struct flexcop_device *fc,
  61. flexcop_ibi_register r)
  62. {
  63. struct flexcop_pci *fc_pci = fc->bus_specific;
  64. flexcop_ibi_value v;
  65. v.raw = readl(fc_pci->io_mem + r);
  66. if (lastrreg != r || lastrval != v.raw) {
  67. lastrreg = r; lastrval = v.raw;
  68. deb_reg("new rd: %3x: %08x\n", r, v.raw);
  69. }
  70. return v;
  71. }
  72. static int flexcop_pci_write_ibi_reg(struct flexcop_device *fc,
  73. flexcop_ibi_register r, flexcop_ibi_value v)
  74. {
  75. struct flexcop_pci *fc_pci = fc->bus_specific;
  76. if (lastwreg != r || lastwval != v.raw) {
  77. lastwreg = r; lastwval = v.raw;
  78. deb_reg("new wr: %3x: %08x\n", r, v.raw);
  79. }
  80. writel(v.raw, fc_pci->io_mem + r);
  81. return 0;
  82. }
  83. static void flexcop_pci_irq_check_work(struct work_struct *work)
  84. {
  85. struct flexcop_pci *fc_pci =
  86. container_of(work, struct flexcop_pci, irq_check_work.work);
  87. struct flexcop_device *fc = fc_pci->fc_dev;
  88. if (fc->feedcount) {
  89. if (fc_pci->count == fc_pci->count_prev) {
  90. deb_chk("no IRQ since the last check\n");
  91. if (fc_pci->stream_problem++ == 3) {
  92. struct dvb_demux_feed *feed;
  93. deb_info("flexcop-pci: stream problem, resetting pid filter\n");
  94. spin_lock_irq(&fc->demux.lock);
  95. list_for_each_entry(feed, &fc->demux.feed_list,
  96. list_head) {
  97. flexcop_pid_feed_control(fc, feed, 0);
  98. }
  99. list_for_each_entry(feed, &fc->demux.feed_list,
  100. list_head) {
  101. flexcop_pid_feed_control(fc, feed, 1);
  102. }
  103. spin_unlock_irq(&fc->demux.lock);
  104. fc_pci->stream_problem = 0;
  105. }
  106. } else {
  107. fc_pci->stream_problem = 0;
  108. fc_pci->count_prev = fc_pci->count;
  109. }
  110. }
  111. schedule_delayed_work(&fc_pci->irq_check_work,
  112. msecs_to_jiffies(irq_chk_intv < 100 ? 100 : irq_chk_intv));
  113. }
  114. /* When PID filtering is turned on, we use the timer IRQ, because small amounts
  115. * of data need to be passed to the user space instantly as well. When PID
  116. * filtering is turned off, we use the page-change-IRQ */
  117. static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
  118. {
  119. struct flexcop_pci *fc_pci = dev_id;
  120. struct flexcop_device *fc = fc_pci->fc_dev;
  121. unsigned long flags;
  122. flexcop_ibi_value v;
  123. irqreturn_t ret = IRQ_HANDLED;
  124. spin_lock_irqsave(&fc_pci->irq_lock, flags);
  125. v = fc->read_ibi_reg(fc, irq_20c);
  126. /* errors */
  127. if (v.irq_20c.Data_receiver_error)
  128. deb_chk("data receiver error\n");
  129. if (v.irq_20c.Continuity_error_flag)
  130. deb_chk("Continuity error flag is set\n");
  131. if (v.irq_20c.LLC_SNAP_FLAG_set)
  132. deb_chk("LLC_SNAP_FLAG_set is set\n");
  133. if (v.irq_20c.Transport_Error)
  134. deb_chk("Transport error\n");
  135. if ((fc_pci->count % 1000) == 0)
  136. deb_chk("%d valid irq took place so far\n", fc_pci->count);
  137. if (v.irq_20c.DMA1_IRQ_Status == 1) {
  138. if (fc_pci->active_dma1_addr == 0)
  139. flexcop_pass_dmx_packets(fc_pci->fc_dev,
  140. fc_pci->dma[0].cpu_addr0,
  141. fc_pci->dma[0].size / 188);
  142. else
  143. flexcop_pass_dmx_packets(fc_pci->fc_dev,
  144. fc_pci->dma[0].cpu_addr1,
  145. fc_pci->dma[0].size / 188);
  146. deb_irq("page change to page: %d\n",!fc_pci->active_dma1_addr);
  147. fc_pci->active_dma1_addr = !fc_pci->active_dma1_addr;
  148. /* for the timer IRQ we only can use buffer dmx feeding, because we don't have
  149. * complete TS packets when reading from the DMA memory */
  150. } else if (v.irq_20c.DMA1_Timer_Status == 1) {
  151. dma_addr_t cur_addr =
  152. fc->read_ibi_reg(fc,dma1_008).dma_0x8.dma_cur_addr << 2;
  153. u32 cur_pos = cur_addr - fc_pci->dma[0].dma_addr0;
  154. deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, last_cur_pos: %08x ",
  155. jiffies_to_usecs(jiffies - fc_pci->last_irq),
  156. v.raw, (unsigned long long)cur_addr, cur_pos,
  157. fc_pci->last_dma1_cur_pos);
  158. fc_pci->last_irq = jiffies;
  159. /* buffer end was reached, restarted from the beginning
  160. * pass the data from last_cur_pos to the buffer end to the demux
  161. */
  162. if (cur_pos < fc_pci->last_dma1_cur_pos) {
  163. deb_irq(" end was reached: passing %d bytes ",
  164. (fc_pci->dma[0].size*2 - 1) -
  165. fc_pci->last_dma1_cur_pos);
  166. flexcop_pass_dmx_data(fc_pci->fc_dev,
  167. fc_pci->dma[0].cpu_addr0 +
  168. fc_pci->last_dma1_cur_pos,
  169. (fc_pci->dma[0].size*2) -
  170. fc_pci->last_dma1_cur_pos);
  171. fc_pci->last_dma1_cur_pos = 0;
  172. }
  173. if (cur_pos > fc_pci->last_dma1_cur_pos) {
  174. deb_irq(" passing %d bytes ",
  175. cur_pos - fc_pci->last_dma1_cur_pos);
  176. flexcop_pass_dmx_data(fc_pci->fc_dev,
  177. fc_pci->dma[0].cpu_addr0 +
  178. fc_pci->last_dma1_cur_pos,
  179. cur_pos - fc_pci->last_dma1_cur_pos);
  180. }
  181. deb_irq("\n");
  182. fc_pci->last_dma1_cur_pos = cur_pos;
  183. fc_pci->count++;
  184. } else {
  185. deb_irq("isr for flexcop called, apparently without reason (%08x)\n",
  186. v.raw);
  187. ret = IRQ_NONE;
  188. }
  189. spin_unlock_irqrestore(&fc_pci->irq_lock, flags);
  190. return ret;
  191. }
  192. static int flexcop_pci_stream_control(struct flexcop_device *fc, int onoff)
  193. {
  194. struct flexcop_pci *fc_pci = fc->bus_specific;
  195. if (onoff) {
  196. flexcop_dma_config(fc, &fc_pci->dma[0], FC_DMA_1);
  197. flexcop_dma_config(fc, &fc_pci->dma[1], FC_DMA_2);
  198. flexcop_dma_config_timer(fc, FC_DMA_1, 0);
  199. flexcop_dma_xfer_control(fc, FC_DMA_1,
  200. FC_DMA_SUBADDR_0 | FC_DMA_SUBADDR_1, 1);
  201. deb_irq("DMA xfer enabled\n");
  202. fc_pci->last_dma1_cur_pos = 0;
  203. flexcop_dma_control_timer_irq(fc, FC_DMA_1, 1);
  204. deb_irq("IRQ enabled\n");
  205. fc_pci->count_prev = fc_pci->count;
  206. } else {
  207. flexcop_dma_control_timer_irq(fc, FC_DMA_1, 0);
  208. deb_irq("IRQ disabled\n");
  209. flexcop_dma_xfer_control(fc, FC_DMA_1,
  210. FC_DMA_SUBADDR_0 | FC_DMA_SUBADDR_1, 0);
  211. deb_irq("DMA xfer disabled\n");
  212. }
  213. return 0;
  214. }
  215. static int flexcop_pci_dma_init(struct flexcop_pci *fc_pci)
  216. {
  217. int ret;
  218. ret = flexcop_dma_allocate(fc_pci->pdev, &fc_pci->dma[0],
  219. FC_DEFAULT_DMA1_BUFSIZE);
  220. if (ret != 0)
  221. return ret;
  222. ret = flexcop_dma_allocate(fc_pci->pdev, &fc_pci->dma[1],
  223. FC_DEFAULT_DMA2_BUFSIZE);
  224. if (ret != 0) {
  225. flexcop_dma_free(&fc_pci->dma[0]);
  226. return ret;
  227. }
  228. flexcop_sram_set_dest(fc_pci->fc_dev, FC_SRAM_DEST_MEDIA |
  229. FC_SRAM_DEST_NET, FC_SRAM_DEST_TARGET_DMA1);
  230. flexcop_sram_set_dest(fc_pci->fc_dev, FC_SRAM_DEST_CAO |
  231. FC_SRAM_DEST_CAI, FC_SRAM_DEST_TARGET_DMA2);
  232. fc_pci->init_state |= FC_PCI_DMA_INIT;
  233. return ret;
  234. }
  235. static void flexcop_pci_dma_exit(struct flexcop_pci *fc_pci)
  236. {
  237. if (fc_pci->init_state & FC_PCI_DMA_INIT) {
  238. flexcop_dma_free(&fc_pci->dma[0]);
  239. flexcop_dma_free(&fc_pci->dma[1]);
  240. }
  241. fc_pci->init_state &= ~FC_PCI_DMA_INIT;
  242. }
  243. static int flexcop_pci_init(struct flexcop_pci *fc_pci)
  244. {
  245. int ret;
  246. info("card revision %x", fc_pci->pdev->revision);
  247. if ((ret = pci_enable_device(fc_pci->pdev)) != 0)
  248. return ret;
  249. pci_set_master(fc_pci->pdev);
  250. if ((ret = pci_request_regions(fc_pci->pdev, DRIVER_NAME)) != 0)
  251. goto err_pci_disable_device;
  252. fc_pci->io_mem = pci_iomap(fc_pci->pdev, 0, 0x800);
  253. if (!fc_pci->io_mem) {
  254. err("cannot map io memory\n");
  255. ret = -EIO;
  256. goto err_pci_release_regions;
  257. }
  258. pci_set_drvdata(fc_pci->pdev, fc_pci);
  259. spin_lock_init(&fc_pci->irq_lock);
  260. if ((ret = request_irq(fc_pci->pdev->irq, flexcop_pci_isr,
  261. IRQF_SHARED, DRIVER_NAME, fc_pci)) != 0)
  262. goto err_pci_iounmap;
  263. fc_pci->init_state |= FC_PCI_INIT;
  264. return ret;
  265. err_pci_iounmap:
  266. pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
  267. err_pci_release_regions:
  268. pci_release_regions(fc_pci->pdev);
  269. err_pci_disable_device:
  270. pci_disable_device(fc_pci->pdev);
  271. return ret;
  272. }
  273. static void flexcop_pci_exit(struct flexcop_pci *fc_pci)
  274. {
  275. if (fc_pci->init_state & FC_PCI_INIT) {
  276. free_irq(fc_pci->pdev->irq, fc_pci);
  277. pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
  278. pci_release_regions(fc_pci->pdev);
  279. pci_disable_device(fc_pci->pdev);
  280. }
  281. fc_pci->init_state &= ~FC_PCI_INIT;
  282. }
  283. static int flexcop_pci_probe(struct pci_dev *pdev,
  284. const struct pci_device_id *ent)
  285. {
  286. struct flexcop_device *fc;
  287. struct flexcop_pci *fc_pci;
  288. int ret = -ENOMEM;
  289. if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_pci))) == NULL) {
  290. err("out of memory\n");
  291. return -ENOMEM;
  292. }
  293. /* general flexcop init */
  294. fc_pci = fc->bus_specific;
  295. fc_pci->fc_dev = fc;
  296. fc->read_ibi_reg = flexcop_pci_read_ibi_reg;
  297. fc->write_ibi_reg = flexcop_pci_write_ibi_reg;
  298. fc->i2c_request = flexcop_i2c_request;
  299. fc->get_mac_addr = flexcop_eeprom_check_mac_addr;
  300. fc->stream_control = flexcop_pci_stream_control;
  301. if (enable_pid_filtering)
  302. info("will use the HW PID filter.");
  303. else
  304. info("will pass the complete TS to the demuxer.");
  305. fc->pid_filtering = enable_pid_filtering;
  306. fc->bus_type = FC_PCI;
  307. fc->dev = &pdev->dev;
  308. fc->owner = THIS_MODULE;
  309. /* bus specific part */
  310. fc_pci->pdev = pdev;
  311. if ((ret = flexcop_pci_init(fc_pci)) != 0)
  312. goto err_kfree;
  313. /* init flexcop */
  314. if ((ret = flexcop_device_initialize(fc)) != 0)
  315. goto err_pci_exit;
  316. /* init dma */
  317. if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
  318. goto err_fc_exit;
  319. INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
  320. if (irq_chk_intv > 0)
  321. schedule_delayed_work(&fc_pci->irq_check_work,
  322. msecs_to_jiffies(irq_chk_intv < 100 ?
  323. 100 :
  324. irq_chk_intv));
  325. return ret;
  326. err_fc_exit:
  327. flexcop_device_exit(fc);
  328. err_pci_exit:
  329. flexcop_pci_exit(fc_pci);
  330. err_kfree:
  331. flexcop_device_kfree(fc);
  332. return ret;
  333. }
  334. /* in theory every _exit function should be called exactly two times,
  335. * here and in the bail-out-part of the _init-function
  336. */
  337. static void flexcop_pci_remove(struct pci_dev *pdev)
  338. {
  339. struct flexcop_pci *fc_pci = pci_get_drvdata(pdev);
  340. if (irq_chk_intv > 0)
  341. cancel_delayed_work(&fc_pci->irq_check_work);
  342. flexcop_pci_dma_exit(fc_pci);
  343. flexcop_device_exit(fc_pci->fc_dev);
  344. flexcop_pci_exit(fc_pci);
  345. flexcop_device_kfree(fc_pci->fc_dev);
  346. }
  347. static const struct pci_device_id flexcop_pci_tbl[] = {
  348. { PCI_DEVICE(0x13d0, 0x2103) },
  349. { },
  350. };
  351. MODULE_DEVICE_TABLE(pci, flexcop_pci_tbl);
  352. static struct pci_driver flexcop_pci_driver = {
  353. .name = "b2c2_flexcop_pci",
  354. .id_table = flexcop_pci_tbl,
  355. .probe = flexcop_pci_probe,
  356. .remove = flexcop_pci_remove,
  357. };
  358. module_pci_driver(flexcop_pci_driver);
  359. MODULE_AUTHOR(DRIVER_AUTHOR);
  360. MODULE_DESCRIPTION(DRIVER_NAME);
  361. MODULE_LICENSE("GPL");