c8sectpfe-core.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * c8sectpfe-core.c - C8SECTPFE STi DVB driver
  4. *
  5. * Copyright (c) STMicroelectronics 2015
  6. *
  7. * Author:Peter Bennett <peter.bennett@st.com>
  8. * Peter Griffin <peter.griffin@linaro.org>
  9. *
  10. */
  11. #include <linux/atomic.h>
  12. #include <linux/clk.h>
  13. #include <linux/completion.h>
  14. #include <linux/delay.h>
  15. #include <linux/device.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/dvb/dmx.h>
  18. #include <linux/dvb/frontend.h>
  19. #include <linux/errno.h>
  20. #include <linux/firmware.h>
  21. #include <linux/init.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/io.h>
  24. #include <linux/module.h>
  25. #include <linux/of_gpio.h>
  26. #include <linux/of_platform.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/usb.h>
  29. #include <linux/slab.h>
  30. #include <linux/time.h>
  31. #include <linux/version.h>
  32. #include <linux/wait.h>
  33. #include <linux/pinctrl/pinctrl.h>
  34. #include "c8sectpfe-core.h"
  35. #include "c8sectpfe-common.h"
  36. #include "c8sectpfe-debugfs.h"
  37. #include <media/dmxdev.h>
  38. #include <media/dvb_demux.h>
  39. #include <media/dvb_frontend.h>
  40. #include <media/dvb_net.h>
  41. #define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
  42. MODULE_FIRMWARE(FIRMWARE_MEMDMA);
  43. #define PID_TABLE_SIZE 1024
  44. #define POLL_MSECS 50
  45. static int load_c8sectpfe_fw(struct c8sectpfei *fei);
  46. #define TS_PKT_SIZE 188
  47. #define HEADER_SIZE (4)
  48. #define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
  49. #define FEI_ALIGNMENT (32)
  50. /* hw requires minimum of 8*PACKET_SIZE and padded to 8byte boundary */
  51. #define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
  52. #define FIFO_LEN 1024
  53. static void c8sectpfe_timer_interrupt(struct timer_list *t)
  54. {
  55. struct c8sectpfei *fei = from_timer(fei, t, timer);
  56. struct channel_info *channel;
  57. int chan_num;
  58. /* iterate through input block channels */
  59. for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
  60. channel = fei->channel_data[chan_num];
  61. /* is this descriptor initialised and TP enabled */
  62. if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
  63. tasklet_schedule(&channel->tsklet);
  64. }
  65. fei->timer.expires = jiffies + msecs_to_jiffies(POLL_MSECS);
  66. add_timer(&fei->timer);
  67. }
  68. static void channel_swdemux_tsklet(unsigned long data)
  69. {
  70. struct channel_info *channel = (struct channel_info *)data;
  71. struct c8sectpfei *fei;
  72. unsigned long wp, rp;
  73. int pos, num_packets, n, size;
  74. u8 *buf;
  75. if (unlikely(!channel || !channel->irec))
  76. return;
  77. fei = channel->fei;
  78. wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
  79. rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
  80. pos = rp - channel->back_buffer_busaddr;
  81. /* has it wrapped */
  82. if (wp < rp)
  83. wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
  84. size = wp - rp;
  85. num_packets = size / PACKET_SIZE;
  86. /* manage cache so data is visible to CPU */
  87. dma_sync_single_for_cpu(fei->dev,
  88. rp,
  89. size,
  90. DMA_FROM_DEVICE);
  91. buf = (u8 *) channel->back_buffer_aligned;
  92. dev_dbg(fei->dev,
  93. "chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\trp=0x%lx, wp=0x%lx\n",
  94. channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
  95. for (n = 0; n < num_packets; n++) {
  96. dvb_dmx_swfilter_packets(
  97. &fei->c8sectpfe[0]->
  98. demux[channel->demux_mapping].dvb_demux,
  99. &buf[pos], 1);
  100. pos += PACKET_SIZE;
  101. }
  102. /* advance the read pointer */
  103. if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
  104. writel(channel->back_buffer_busaddr, channel->irec +
  105. DMA_PRDS_BUSRP_TP(0));
  106. else
  107. writel(wp, channel->irec + DMA_PRDS_BUSRP_TP(0));
  108. }
  109. static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
  110. {
  111. struct dvb_demux *demux = dvbdmxfeed->demux;
  112. struct stdemux *stdemux = (struct stdemux *)demux->priv;
  113. struct c8sectpfei *fei = stdemux->c8sectpfei;
  114. struct channel_info *channel;
  115. u32 tmp;
  116. unsigned long *bitmap;
  117. int ret;
  118. switch (dvbdmxfeed->type) {
  119. case DMX_TYPE_TS:
  120. break;
  121. case DMX_TYPE_SEC:
  122. break;
  123. default:
  124. dev_err(fei->dev, "%s:%d Error bailing\n"
  125. , __func__, __LINE__);
  126. return -EINVAL;
  127. }
  128. if (dvbdmxfeed->type == DMX_TYPE_TS) {
  129. switch (dvbdmxfeed->pes_type) {
  130. case DMX_PES_VIDEO:
  131. case DMX_PES_AUDIO:
  132. case DMX_PES_TELETEXT:
  133. case DMX_PES_PCR:
  134. case DMX_PES_OTHER:
  135. break;
  136. default:
  137. dev_err(fei->dev, "%s:%d Error bailing\n"
  138. , __func__, __LINE__);
  139. return -EINVAL;
  140. }
  141. }
  142. if (!atomic_read(&fei->fw_loaded)) {
  143. ret = load_c8sectpfe_fw(fei);
  144. if (ret)
  145. return ret;
  146. }
  147. mutex_lock(&fei->lock);
  148. channel = fei->channel_data[stdemux->tsin_index];
  149. bitmap = (unsigned long *) channel->pid_buffer_aligned;
  150. /* 8192 is a special PID */
  151. if (dvbdmxfeed->pid == 8192) {
  152. tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
  153. tmp &= ~C8SECTPFE_PID_ENABLE;
  154. writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
  155. } else {
  156. bitmap_set(bitmap, dvbdmxfeed->pid, 1);
  157. }
  158. /* manage cache so PID bitmap is visible to HW */
  159. dma_sync_single_for_device(fei->dev,
  160. channel->pid_buffer_busaddr,
  161. PID_TABLE_SIZE,
  162. DMA_TO_DEVICE);
  163. channel->active = 1;
  164. if (fei->global_feed_count == 0) {
  165. fei->timer.expires = jiffies +
  166. msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
  167. add_timer(&fei->timer);
  168. }
  169. if (stdemux->running_feed_count == 0) {
  170. dev_dbg(fei->dev, "Starting channel=%p\n", channel);
  171. tasklet_init(&channel->tsklet, channel_swdemux_tsklet,
  172. (unsigned long) channel);
  173. /* Reset the internal inputblock sram pointers */
  174. writel(channel->fifo,
  175. fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
  176. writel(channel->fifo + FIFO_LEN - 1,
  177. fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
  178. writel(channel->fifo,
  179. fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
  180. writel(channel->fifo,
  181. fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
  182. /* reset read / write memdma ptrs for this channel */
  183. writel(channel->back_buffer_busaddr, channel->irec +
  184. DMA_PRDS_BUSBASE_TP(0));
  185. tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
  186. writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
  187. writel(channel->back_buffer_busaddr, channel->irec +
  188. DMA_PRDS_BUSWP_TP(0));
  189. /* Issue a reset and enable InputBlock */
  190. writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
  191. , fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
  192. /* and enable the tp */
  193. writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
  194. dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
  195. , __func__, __LINE__, stdemux);
  196. }
  197. stdemux->running_feed_count++;
  198. fei->global_feed_count++;
  199. mutex_unlock(&fei->lock);
  200. return 0;
  201. }
  202. static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
  203. {
  204. struct dvb_demux *demux = dvbdmxfeed->demux;
  205. struct stdemux *stdemux = (struct stdemux *)demux->priv;
  206. struct c8sectpfei *fei = stdemux->c8sectpfei;
  207. struct channel_info *channel;
  208. int idlereq;
  209. u32 tmp;
  210. int ret;
  211. unsigned long *bitmap;
  212. if (!atomic_read(&fei->fw_loaded)) {
  213. ret = load_c8sectpfe_fw(fei);
  214. if (ret)
  215. return ret;
  216. }
  217. mutex_lock(&fei->lock);
  218. channel = fei->channel_data[stdemux->tsin_index];
  219. bitmap = (unsigned long *) channel->pid_buffer_aligned;
  220. if (dvbdmxfeed->pid == 8192) {
  221. tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
  222. tmp |= C8SECTPFE_PID_ENABLE;
  223. writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
  224. } else {
  225. bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
  226. }
  227. /* manage cache so data is visible to HW */
  228. dma_sync_single_for_device(fei->dev,
  229. channel->pid_buffer_busaddr,
  230. PID_TABLE_SIZE,
  231. DMA_TO_DEVICE);
  232. if (--stdemux->running_feed_count == 0) {
  233. channel = fei->channel_data[stdemux->tsin_index];
  234. /* TP re-configuration on page 168 of functional spec */
  235. /* disable IB (prevents more TS data going to memdma) */
  236. writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
  237. /* disable this channels descriptor */
  238. writel(0, channel->irec + DMA_PRDS_TPENABLE);
  239. tasklet_disable(&channel->tsklet);
  240. /* now request memdma channel goes idle */
  241. idlereq = (1 << channel->tsin_id) | IDLEREQ;
  242. writel(idlereq, fei->io + DMA_IDLE_REQ);
  243. /* wait for idle irq handler to signal completion */
  244. ret = wait_for_completion_timeout(&channel->idle_completion,
  245. msecs_to_jiffies(100));
  246. if (ret == 0)
  247. dev_warn(fei->dev,
  248. "Timeout waiting for idle irq on tsin%d\n",
  249. channel->tsin_id);
  250. reinit_completion(&channel->idle_completion);
  251. /* reset read / write ptrs for this channel */
  252. writel(channel->back_buffer_busaddr,
  253. channel->irec + DMA_PRDS_BUSBASE_TP(0));
  254. tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
  255. writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
  256. writel(channel->back_buffer_busaddr,
  257. channel->irec + DMA_PRDS_BUSWP_TP(0));
  258. dev_dbg(fei->dev,
  259. "%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
  260. __func__, __LINE__, stdemux, channel->tsin_id);
  261. /* turn off all PIDS in the bitmap */
  262. memset((void *)channel->pid_buffer_aligned
  263. , 0x00, PID_TABLE_SIZE);
  264. /* manage cache so data is visible to HW */
  265. dma_sync_single_for_device(fei->dev,
  266. channel->pid_buffer_busaddr,
  267. PID_TABLE_SIZE,
  268. DMA_TO_DEVICE);
  269. channel->active = 0;
  270. }
  271. if (--fei->global_feed_count == 0) {
  272. dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
  273. , __func__, __LINE__, fei->global_feed_count);
  274. del_timer(&fei->timer);
  275. }
  276. mutex_unlock(&fei->lock);
  277. return 0;
  278. }
  279. static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
  280. {
  281. int i;
  282. for (i = 0; i < C8SECTPFE_MAX_TSIN_CHAN; i++) {
  283. if (!fei->channel_data[i])
  284. continue;
  285. if (fei->channel_data[i]->tsin_id == tsin_num)
  286. return fei->channel_data[i];
  287. }
  288. return NULL;
  289. }
  290. static void c8sectpfe_getconfig(struct c8sectpfei *fei)
  291. {
  292. struct c8sectpfe_hw *hw = &fei->hw_stats;
  293. hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
  294. hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
  295. hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
  296. hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
  297. hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
  298. hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
  299. hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
  300. dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
  301. dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
  302. dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
  303. dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
  304. , hw->num_swts);
  305. dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
  306. dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
  307. dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
  308. dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
  309. , hw->num_tp);
  310. }
  311. static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
  312. {
  313. struct c8sectpfei *fei = priv;
  314. struct channel_info *chan;
  315. int bit;
  316. unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
  317. /* page 168 of functional spec: Clear the idle request
  318. by writing 0 to the C8SECTPFE_DMA_IDLE_REQ register. */
  319. /* signal idle completion */
  320. for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
  321. chan = find_channel(fei, bit);
  322. if (chan)
  323. complete(&chan->idle_completion);
  324. }
  325. writel(0, fei->io + DMA_IDLE_REQ);
  326. return IRQ_HANDLED;
  327. }
  328. static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
  329. {
  330. if (!fei || !tsin)
  331. return;
  332. if (tsin->back_buffer_busaddr)
  333. if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
  334. dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
  335. FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
  336. kfree(tsin->back_buffer_start);
  337. if (tsin->pid_buffer_busaddr)
  338. if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
  339. dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
  340. PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
  341. kfree(tsin->pid_buffer_start);
  342. }
  343. #define MAX_NAME 20
  344. static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
  345. struct channel_info *tsin)
  346. {
  347. int ret;
  348. u32 tmp;
  349. char tsin_pin_name[MAX_NAME];
  350. if (!fei || !tsin)
  351. return -EINVAL;
  352. dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
  353. , __func__, __LINE__, tsin, tsin->tsin_id);
  354. init_completion(&tsin->idle_completion);
  355. tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE +
  356. FEI_ALIGNMENT, GFP_KERNEL);
  357. if (!tsin->back_buffer_start) {
  358. ret = -ENOMEM;
  359. goto err_unmap;
  360. }
  361. /* Ensure backbuffer is 32byte aligned */
  362. tsin->back_buffer_aligned = tsin->back_buffer_start
  363. + FEI_ALIGNMENT;
  364. tsin->back_buffer_aligned = (void *)
  365. (((uintptr_t) tsin->back_buffer_aligned) & ~0x1F);
  366. tsin->back_buffer_busaddr = dma_map_single(fei->dev,
  367. (void *)tsin->back_buffer_aligned,
  368. FEI_BUFFER_SIZE,
  369. DMA_BIDIRECTIONAL);
  370. if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
  371. dev_err(fei->dev, "failed to map back_buffer\n");
  372. ret = -EFAULT;
  373. goto err_unmap;
  374. }
  375. /*
  376. * The pid buffer can be configured (in hw) for byte or bit
  377. * per pid. By powers of deduction we conclude stih407 family
  378. * is configured (at SoC design stage) for bit per pid.
  379. */
  380. tsin->pid_buffer_start = kzalloc(2048, GFP_KERNEL);
  381. if (!tsin->pid_buffer_start) {
  382. ret = -ENOMEM;
  383. goto err_unmap;
  384. }
  385. /*
  386. * PID buffer needs to be aligned to size of the pid table
  387. * which at bit per pid is 1024 bytes (8192 pids / 8).
  388. * PIDF_BASE register enforces this alignment when writing
  389. * the register.
  390. */
  391. tsin->pid_buffer_aligned = tsin->pid_buffer_start +
  392. PID_TABLE_SIZE;
  393. tsin->pid_buffer_aligned = (void *)
  394. (((uintptr_t) tsin->pid_buffer_aligned) & ~0x3ff);
  395. tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
  396. tsin->pid_buffer_aligned,
  397. PID_TABLE_SIZE,
  398. DMA_BIDIRECTIONAL);
  399. if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
  400. dev_err(fei->dev, "failed to map pid_bitmap\n");
  401. ret = -EFAULT;
  402. goto err_unmap;
  403. }
  404. /* manage cache so pid bitmap is visible to HW */
  405. dma_sync_single_for_device(fei->dev,
  406. tsin->pid_buffer_busaddr,
  407. PID_TABLE_SIZE,
  408. DMA_TO_DEVICE);
  409. snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
  410. (tsin->serial_not_parallel ? "serial" : "parallel"));
  411. tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
  412. if (IS_ERR(tsin->pstate)) {
  413. dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
  414. , __func__, tsin_pin_name);
  415. ret = PTR_ERR(tsin->pstate);
  416. goto err_unmap;
  417. }
  418. ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
  419. if (ret) {
  420. dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
  421. , __func__);
  422. goto err_unmap;
  423. }
  424. /* Enable this input block */
  425. tmp = readl(fei->io + SYS_INPUT_CLKEN);
  426. tmp |= BIT(tsin->tsin_id);
  427. writel(tmp, fei->io + SYS_INPUT_CLKEN);
  428. if (tsin->serial_not_parallel)
  429. tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
  430. if (tsin->invert_ts_clk)
  431. tmp |= C8SECTPFE_INVERT_TSCLK;
  432. if (tsin->async_not_sync)
  433. tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
  434. tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
  435. writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
  436. writel(C8SECTPFE_SYNC(0x9) |
  437. C8SECTPFE_DROP(0x9) |
  438. C8SECTPFE_TOKEN(0x47),
  439. fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
  440. writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
  441. /* Place the FIFO's at the end of the irec descriptors */
  442. tsin->fifo = (tsin->tsin_id * FIFO_LEN);
  443. writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
  444. writel(tsin->fifo + FIFO_LEN - 1,
  445. fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
  446. writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
  447. writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
  448. writel(tsin->pid_buffer_busaddr,
  449. fei->io + PIDF_BASE(tsin->tsin_id));
  450. dev_dbg(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
  451. tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
  452. &tsin->pid_buffer_busaddr);
  453. /* Configure and enable HW PID filtering */
  454. /*
  455. * The PID value is created by assembling the first 8 bytes of
  456. * the TS packet into a 64-bit word in big-endian format. A
  457. * slice of that 64-bit word is taken from
  458. * (PID_OFFSET+PID_NUM_BITS-1) to PID_OFFSET.
  459. */
  460. tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
  461. | C8SECTPFE_PID_OFFSET(40));
  462. writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
  463. dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
  464. tsin->tsin_id,
  465. readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
  466. readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
  467. readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
  468. readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
  469. /* Get base addpress of pointer record block from DMEM */
  470. tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
  471. readl(fei->io + DMA_PTRREC_BASE);
  472. /* fill out pointer record data structure */
  473. /* advance pointer record block to our channel */
  474. tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
  475. writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
  476. writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
  477. writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
  478. writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
  479. /* read/write pointers with physical bus address */
  480. writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
  481. tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
  482. writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
  483. writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
  484. writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
  485. /* initialize tasklet */
  486. tasklet_init(&tsin->tsklet, channel_swdemux_tsklet,
  487. (unsigned long) tsin);
  488. return 0;
  489. err_unmap:
  490. free_input_block(fei, tsin);
  491. return ret;
  492. }
  493. static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
  494. {
  495. struct c8sectpfei *fei = priv;
  496. dev_err(fei->dev, "%s: error handling not yet implemented\n"
  497. , __func__);
  498. /*
  499. * TODO FIXME we should detect some error conditions here
  500. * and ideally so something about them!
  501. */
  502. return IRQ_HANDLED;
  503. }
  504. static int c8sectpfe_probe(struct platform_device *pdev)
  505. {
  506. struct device *dev = &pdev->dev;
  507. struct device_node *child, *np = dev->of_node;
  508. struct c8sectpfei *fei;
  509. struct resource *res;
  510. int ret, index = 0;
  511. struct channel_info *tsin;
  512. /* Allocate the c8sectpfei structure */
  513. fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
  514. if (!fei)
  515. return -ENOMEM;
  516. fei->dev = dev;
  517. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
  518. fei->io = devm_ioremap_resource(dev, res);
  519. if (IS_ERR(fei->io))
  520. return PTR_ERR(fei->io);
  521. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  522. "c8sectpfe-ram");
  523. fei->sram = devm_ioremap_resource(dev, res);
  524. if (IS_ERR(fei->sram))
  525. return PTR_ERR(fei->sram);
  526. fei->sram_size = resource_size(res);
  527. fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
  528. if (fei->idle_irq < 0)
  529. return fei->idle_irq;
  530. fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
  531. if (fei->error_irq < 0)
  532. return fei->error_irq;
  533. platform_set_drvdata(pdev, fei);
  534. fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
  535. if (IS_ERR(fei->c8sectpfeclk)) {
  536. dev_err(dev, "c8sectpfe clk not found\n");
  537. return PTR_ERR(fei->c8sectpfeclk);
  538. }
  539. ret = clk_prepare_enable(fei->c8sectpfeclk);
  540. if (ret) {
  541. dev_err(dev, "Failed to enable c8sectpfe clock\n");
  542. return ret;
  543. }
  544. /* to save power disable all IP's (on by default) */
  545. writel(0, fei->io + SYS_INPUT_CLKEN);
  546. /* Enable memdma clock */
  547. writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
  548. /* clear internal sram */
  549. memset_io(fei->sram, 0x0, fei->sram_size);
  550. c8sectpfe_getconfig(fei);
  551. ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
  552. 0, "c8sectpfe-idle-irq", fei);
  553. if (ret) {
  554. dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
  555. goto err_clk_disable;
  556. }
  557. ret = devm_request_irq(dev, fei->error_irq,
  558. c8sectpfe_error_irq_handler, 0,
  559. "c8sectpfe-error-irq", fei);
  560. if (ret) {
  561. dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
  562. goto err_clk_disable;
  563. }
  564. fei->tsin_count = of_get_child_count(np);
  565. if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
  566. fei->tsin_count > fei->hw_stats.num_ib) {
  567. dev_err(dev, "More tsin declared than exist on SoC!\n");
  568. ret = -EINVAL;
  569. goto err_clk_disable;
  570. }
  571. fei->pinctrl = devm_pinctrl_get(dev);
  572. if (IS_ERR(fei->pinctrl)) {
  573. dev_err(dev, "Error getting tsin pins\n");
  574. ret = PTR_ERR(fei->pinctrl);
  575. goto err_clk_disable;
  576. }
  577. for_each_child_of_node(np, child) {
  578. struct device_node *i2c_bus;
  579. fei->channel_data[index] = devm_kzalloc(dev,
  580. sizeof(struct channel_info),
  581. GFP_KERNEL);
  582. if (!fei->channel_data[index]) {
  583. ret = -ENOMEM;
  584. goto err_clk_disable;
  585. }
  586. tsin = fei->channel_data[index];
  587. tsin->fei = fei;
  588. ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
  589. if (ret) {
  590. dev_err(&pdev->dev, "No tsin_num found\n");
  591. goto err_clk_disable;
  592. }
  593. /* sanity check value */
  594. if (tsin->tsin_id > fei->hw_stats.num_ib) {
  595. dev_err(&pdev->dev,
  596. "tsin-num %d specified greater than number\n\tof input block hw in SoC! (%d)",
  597. tsin->tsin_id, fei->hw_stats.num_ib);
  598. ret = -EINVAL;
  599. goto err_clk_disable;
  600. }
  601. tsin->invert_ts_clk = of_property_read_bool(child,
  602. "invert-ts-clk");
  603. tsin->serial_not_parallel = of_property_read_bool(child,
  604. "serial-not-parallel");
  605. tsin->async_not_sync = of_property_read_bool(child,
  606. "async-not-sync");
  607. ret = of_property_read_u32(child, "dvb-card",
  608. &tsin->dvb_card);
  609. if (ret) {
  610. dev_err(&pdev->dev, "No dvb-card found\n");
  611. goto err_clk_disable;
  612. }
  613. i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
  614. if (!i2c_bus) {
  615. dev_err(&pdev->dev, "No i2c-bus found\n");
  616. ret = -ENODEV;
  617. goto err_clk_disable;
  618. }
  619. tsin->i2c_adapter =
  620. of_find_i2c_adapter_by_node(i2c_bus);
  621. if (!tsin->i2c_adapter) {
  622. dev_err(&pdev->dev, "No i2c adapter found\n");
  623. of_node_put(i2c_bus);
  624. ret = -ENODEV;
  625. goto err_clk_disable;
  626. }
  627. of_node_put(i2c_bus);
  628. tsin->rst_gpio = of_get_named_gpio(child, "reset-gpios", 0);
  629. ret = gpio_is_valid(tsin->rst_gpio);
  630. if (!ret) {
  631. dev_err(dev,
  632. "reset gpio for tsin%d not valid (gpio=%d)\n",
  633. tsin->tsin_id, tsin->rst_gpio);
  634. goto err_clk_disable;
  635. }
  636. ret = devm_gpio_request_one(dev, tsin->rst_gpio,
  637. GPIOF_OUT_INIT_LOW, "NIM reset");
  638. if (ret && ret != -EBUSY) {
  639. dev_err(dev, "Can't request tsin%d reset gpio\n"
  640. , fei->channel_data[index]->tsin_id);
  641. goto err_clk_disable;
  642. }
  643. if (!ret) {
  644. /* toggle reset lines */
  645. gpio_direction_output(tsin->rst_gpio, 0);
  646. usleep_range(3500, 5000);
  647. gpio_direction_output(tsin->rst_gpio, 1);
  648. usleep_range(3000, 5000);
  649. }
  650. tsin->demux_mapping = index;
  651. dev_dbg(fei->dev,
  652. "channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\tserial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
  653. fei->channel_data[index], index,
  654. tsin->tsin_id, tsin->invert_ts_clk,
  655. tsin->serial_not_parallel, tsin->async_not_sync,
  656. tsin->dvb_card);
  657. index++;
  658. }
  659. /* Setup timer interrupt */
  660. timer_setup(&fei->timer, c8sectpfe_timer_interrupt, 0);
  661. mutex_init(&fei->lock);
  662. /* Get the configuration information about the tuners */
  663. ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
  664. (void *)fei,
  665. c8sectpfe_start_feed,
  666. c8sectpfe_stop_feed);
  667. if (ret) {
  668. dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
  669. ret);
  670. goto err_clk_disable;
  671. }
  672. c8sectpfe_debugfs_init(fei);
  673. return 0;
  674. err_clk_disable:
  675. clk_disable_unprepare(fei->c8sectpfeclk);
  676. return ret;
  677. }
  678. static int c8sectpfe_remove(struct platform_device *pdev)
  679. {
  680. struct c8sectpfei *fei = platform_get_drvdata(pdev);
  681. struct channel_info *channel;
  682. int i;
  683. wait_for_completion(&fei->fw_ack);
  684. c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
  685. /*
  686. * Now loop through and un-configure each of the InputBlock resources
  687. */
  688. for (i = 0; i < fei->tsin_count; i++) {
  689. channel = fei->channel_data[i];
  690. free_input_block(fei, channel);
  691. }
  692. c8sectpfe_debugfs_exit(fei);
  693. dev_info(fei->dev, "Stopping memdma SLIM core\n");
  694. if (readl(fei->io + DMA_CPU_RUN))
  695. writel(0x0, fei->io + DMA_CPU_RUN);
  696. /* unclock all internal IP's */
  697. if (readl(fei->io + SYS_INPUT_CLKEN))
  698. writel(0, fei->io + SYS_INPUT_CLKEN);
  699. if (readl(fei->io + SYS_OTHER_CLKEN))
  700. writel(0, fei->io + SYS_OTHER_CLKEN);
  701. if (fei->c8sectpfeclk)
  702. clk_disable_unprepare(fei->c8sectpfeclk);
  703. return 0;
  704. }
  705. static int configure_channels(struct c8sectpfei *fei)
  706. {
  707. int index = 0, ret;
  708. struct channel_info *tsin;
  709. struct device_node *child, *np = fei->dev->of_node;
  710. /* iterate round each tsin and configure memdma descriptor and IB hw */
  711. for_each_child_of_node(np, child) {
  712. tsin = fei->channel_data[index];
  713. ret = configure_memdma_and_inputblock(fei,
  714. fei->channel_data[index]);
  715. if (ret) {
  716. dev_err(fei->dev,
  717. "configure_memdma_and_inputblock failed\n");
  718. goto err_unmap;
  719. }
  720. index++;
  721. }
  722. return 0;
  723. err_unmap:
  724. for (index = 0; index < fei->tsin_count; index++) {
  725. tsin = fei->channel_data[index];
  726. free_input_block(fei, tsin);
  727. }
  728. return ret;
  729. }
  730. static int
  731. c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
  732. {
  733. struct elf32_hdr *ehdr;
  734. char class;
  735. if (!fw) {
  736. dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
  737. return -EINVAL;
  738. }
  739. if (fw->size < sizeof(struct elf32_hdr)) {
  740. dev_err(fei->dev, "Image is too small\n");
  741. return -EINVAL;
  742. }
  743. ehdr = (struct elf32_hdr *)fw->data;
  744. /* We only support ELF32 at this point */
  745. class = ehdr->e_ident[EI_CLASS];
  746. if (class != ELFCLASS32) {
  747. dev_err(fei->dev, "Unsupported class: %d\n", class);
  748. return -EINVAL;
  749. }
  750. if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
  751. dev_err(fei->dev, "Unsupported firmware endianness\n");
  752. return -EINVAL;
  753. }
  754. if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
  755. dev_err(fei->dev, "Image is too small\n");
  756. return -EINVAL;
  757. }
  758. if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
  759. dev_err(fei->dev, "Image is corrupted (bad magic)\n");
  760. return -EINVAL;
  761. }
  762. /* Check ELF magic */
  763. ehdr = (Elf32_Ehdr *)fw->data;
  764. if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
  765. ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
  766. ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
  767. ehdr->e_ident[EI_MAG3] != ELFMAG3) {
  768. dev_err(fei->dev, "Invalid ELF magic\n");
  769. return -EINVAL;
  770. }
  771. if (ehdr->e_type != ET_EXEC) {
  772. dev_err(fei->dev, "Unsupported ELF header type\n");
  773. return -EINVAL;
  774. }
  775. if (ehdr->e_phoff > fw->size) {
  776. dev_err(fei->dev, "Firmware size is too small\n");
  777. return -EINVAL;
  778. }
  779. return 0;
  780. }
  781. static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
  782. const struct firmware *fw, u8 __iomem *dest,
  783. int seg_num)
  784. {
  785. const u8 *imem_src = fw->data + phdr->p_offset;
  786. int i;
  787. /*
  788. * For IMEM segments, the segment contains 24-bit
  789. * instructions which must be padded to 32-bit
  790. * instructions before being written. The written
  791. * segment is padded with NOP instructions.
  792. */
  793. dev_dbg(fei->dev,
  794. "Loading IMEM segment %d 0x%08x\n\t (0x%x bytes) -> 0x%p (0x%x bytes)\n",
  795. seg_num,
  796. phdr->p_paddr, phdr->p_filesz,
  797. dest, phdr->p_memsz + phdr->p_memsz / 3);
  798. for (i = 0; i < phdr->p_filesz; i++) {
  799. writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
  800. /* Every 3 bytes, add an additional
  801. * padding zero in destination */
  802. if (i % 3 == 2) {
  803. dest++;
  804. writeb(0x00, (void __iomem *)dest);
  805. }
  806. dest++;
  807. imem_src++;
  808. }
  809. }
  810. static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
  811. const struct firmware *fw, u8 __iomem *dst, int seg_num)
  812. {
  813. /*
  814. * For DMEM segments copy the segment data from the ELF
  815. * file and pad segment with zeroes
  816. */
  817. dev_dbg(fei->dev,
  818. "Loading DMEM segment %d 0x%08x\n\t(0x%x bytes) -> 0x%p (0x%x bytes)\n",
  819. seg_num, phdr->p_paddr, phdr->p_filesz,
  820. dst, phdr->p_memsz);
  821. memcpy((void __force *)dst, (void *)fw->data + phdr->p_offset,
  822. phdr->p_filesz);
  823. memset((void __force *)dst + phdr->p_filesz, 0,
  824. phdr->p_memsz - phdr->p_filesz);
  825. }
  826. static int load_slim_core_fw(const struct firmware *fw, struct c8sectpfei *fei)
  827. {
  828. Elf32_Ehdr *ehdr;
  829. Elf32_Phdr *phdr;
  830. u8 __iomem *dst;
  831. int err = 0, i;
  832. if (!fw || !fei)
  833. return -EINVAL;
  834. ehdr = (Elf32_Ehdr *)fw->data;
  835. phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
  836. /* go through the available ELF segments */
  837. for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
  838. /* Only consider LOAD segments */
  839. if (phdr->p_type != PT_LOAD)
  840. continue;
  841. /*
  842. * Check segment is contained within the fw->data buffer
  843. */
  844. if (phdr->p_offset + phdr->p_filesz > fw->size) {
  845. dev_err(fei->dev,
  846. "Segment %d is outside of firmware file\n", i);
  847. err = -EINVAL;
  848. break;
  849. }
  850. /*
  851. * MEMDMA IMEM has executable flag set, otherwise load
  852. * this segment into DMEM.
  853. *
  854. */
  855. if (phdr->p_flags & PF_X) {
  856. dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
  857. /*
  858. * The Slim ELF file uses 32-bit word addressing for
  859. * load offsets.
  860. */
  861. dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
  862. load_imem_segment(fei, phdr, fw, dst, i);
  863. } else {
  864. dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
  865. /*
  866. * The Slim ELF file uses 32-bit word addressing for
  867. * load offsets.
  868. */
  869. dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
  870. load_dmem_segment(fei, phdr, fw, dst, i);
  871. }
  872. }
  873. release_firmware(fw);
  874. return err;
  875. }
  876. static int load_c8sectpfe_fw(struct c8sectpfei *fei)
  877. {
  878. const struct firmware *fw;
  879. int err;
  880. dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
  881. err = request_firmware(&fw, FIRMWARE_MEMDMA, fei->dev);
  882. if (err)
  883. return err;
  884. err = c8sectpfe_elf_sanity_check(fei, fw);
  885. if (err) {
  886. dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
  887. , err);
  888. release_firmware(fw);
  889. return err;
  890. }
  891. err = load_slim_core_fw(fw, fei);
  892. if (err) {
  893. dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
  894. return err;
  895. }
  896. /* now the firmware is loaded configure the input blocks */
  897. err = configure_channels(fei);
  898. if (err) {
  899. dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
  900. return err;
  901. }
  902. /*
  903. * STBus target port can access IMEM and DMEM ports
  904. * without waiting for CPU
  905. */
  906. writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
  907. dev_info(fei->dev, "Boot the memdma SLIM core\n");
  908. writel(0x1, fei->io + DMA_CPU_RUN);
  909. atomic_set(&fei->fw_loaded, 1);
  910. return 0;
  911. }
  912. static const struct of_device_id c8sectpfe_match[] = {
  913. { .compatible = "st,stih407-c8sectpfe" },
  914. { /* sentinel */ },
  915. };
  916. MODULE_DEVICE_TABLE(of, c8sectpfe_match);
  917. static struct platform_driver c8sectpfe_driver = {
  918. .driver = {
  919. .name = "c8sectpfe",
  920. .of_match_table = of_match_ptr(c8sectpfe_match),
  921. },
  922. .probe = c8sectpfe_probe,
  923. .remove = c8sectpfe_remove,
  924. };
  925. module_platform_driver(c8sectpfe_driver);
  926. MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
  927. MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
  928. MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
  929. MODULE_LICENSE("GPL");