fmdrv_common.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679
  1. /*
  2. * FM Driver for Connectivity chip of Texas Instruments.
  3. *
  4. * This sub-module of FM driver is common for FM RX and TX
  5. * functionality. This module is responsible for:
  6. * 1) Forming group of Channel-8 commands to perform particular
  7. * functionality (eg., frequency set require more than
  8. * one Channel-8 command to be sent to the chip).
  9. * 2) Sending each Channel-8 command to the chip and reading
  10. * response back over Shared Transport.
  11. * 3) Managing TX and RX Queues and Tasklets.
  12. * 4) Handling FM Interrupt packet and taking appropriate action.
  13. * 5) Loading FM firmware to the chip (common, FM TX, and FM RX
  14. * firmware files based on mode selection)
  15. *
  16. * Copyright (C) 2011 Texas Instruments
  17. * Author: Raja Mani <raja_mani@ti.com>
  18. * Author: Manjunatha Halli <manjunatha_halli@ti.com>
  19. *
  20. * This program is free software; you can redistribute it and/or modify
  21. * it under the terms of the GNU General Public License version 2 as
  22. * published by the Free Software Foundation.
  23. *
  24. * This program is distributed in the hope that it will be useful,
  25. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  26. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  27. * GNU General Public License for more details.
  28. *
  29. */
  30. #include <linux/module.h>
  31. #include <linux/firmware.h>
  32. #include <linux/delay.h>
  33. #include "fmdrv.h"
  34. #include "fmdrv_v4l2.h"
  35. #include "fmdrv_common.h"
  36. #include <linux/ti_wilink_st.h>
  37. #include "fmdrv_rx.h"
  38. #include "fmdrv_tx.h"
  39. /* Region info */
  40. static struct region_info region_configs[] = {
  41. /* Europe/US */
  42. {
  43. .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL,
  44. .bot_freq = 87500, /* 87.5 MHz */
  45. .top_freq = 108000, /* 108 MHz */
  46. .fm_band = 0,
  47. },
  48. /* Japan */
  49. {
  50. .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL,
  51. .bot_freq = 76000, /* 76 MHz */
  52. .top_freq = 90000, /* 90 MHz */
  53. .fm_band = 1,
  54. },
  55. };
  56. /* Band selection */
  57. static u8 default_radio_region; /* Europe/US */
  58. module_param(default_radio_region, byte, 0);
  59. MODULE_PARM_DESC(default_radio_region, "Region: 0=Europe/US, 1=Japan");
  60. /* RDS buffer blocks */
  61. static u32 default_rds_buf = 300;
  62. module_param(default_rds_buf, uint, 0444);
  63. MODULE_PARM_DESC(default_rds_buf, "RDS buffer entries");
  64. /* Radio Nr */
  65. static u32 radio_nr = -1;
  66. module_param(radio_nr, int, 0444);
  67. MODULE_PARM_DESC(radio_nr, "Radio Nr");
  68. /* FM irq handlers forward declaration */
  69. static void fm_irq_send_flag_getcmd(struct fmdev *);
  70. static void fm_irq_handle_flag_getcmd_resp(struct fmdev *);
  71. static void fm_irq_handle_hw_malfunction(struct fmdev *);
  72. static void fm_irq_handle_rds_start(struct fmdev *);
  73. static void fm_irq_send_rdsdata_getcmd(struct fmdev *);
  74. static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *);
  75. static void fm_irq_handle_rds_finish(struct fmdev *);
  76. static void fm_irq_handle_tune_op_ended(struct fmdev *);
  77. static void fm_irq_handle_power_enb(struct fmdev *);
  78. static void fm_irq_handle_low_rssi_start(struct fmdev *);
  79. static void fm_irq_afjump_set_pi(struct fmdev *);
  80. static void fm_irq_handle_set_pi_resp(struct fmdev *);
  81. static void fm_irq_afjump_set_pimask(struct fmdev *);
  82. static void fm_irq_handle_set_pimask_resp(struct fmdev *);
  83. static void fm_irq_afjump_setfreq(struct fmdev *);
  84. static void fm_irq_handle_setfreq_resp(struct fmdev *);
  85. static void fm_irq_afjump_enableint(struct fmdev *);
  86. static void fm_irq_afjump_enableint_resp(struct fmdev *);
  87. static void fm_irq_start_afjump(struct fmdev *);
  88. static void fm_irq_handle_start_afjump_resp(struct fmdev *);
  89. static void fm_irq_afjump_rd_freq(struct fmdev *);
  90. static void fm_irq_afjump_rd_freq_resp(struct fmdev *);
  91. static void fm_irq_handle_low_rssi_finish(struct fmdev *);
  92. static void fm_irq_send_intmsk_cmd(struct fmdev *);
  93. static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *);
  94. /*
  95. * When FM common module receives interrupt packet, following handlers
  96. * will be executed one after another to service the interrupt(s)
  97. */
  98. enum fmc_irq_handler_index {
  99. FM_SEND_FLAG_GETCMD_IDX,
  100. FM_HANDLE_FLAG_GETCMD_RESP_IDX,
  101. /* HW malfunction irq handler */
  102. FM_HW_MAL_FUNC_IDX,
  103. /* RDS threshold reached irq handler */
  104. FM_RDS_START_IDX,
  105. FM_RDS_SEND_RDS_GETCMD_IDX,
  106. FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX,
  107. FM_RDS_FINISH_IDX,
  108. /* Tune operation ended irq handler */
  109. FM_HW_TUNE_OP_ENDED_IDX,
  110. /* TX power enable irq handler */
  111. FM_HW_POWER_ENB_IDX,
  112. /* Low RSSI irq handler */
  113. FM_LOW_RSSI_START_IDX,
  114. FM_AF_JUMP_SETPI_IDX,
  115. FM_AF_JUMP_HANDLE_SETPI_RESP_IDX,
  116. FM_AF_JUMP_SETPI_MASK_IDX,
  117. FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX,
  118. FM_AF_JUMP_SET_AF_FREQ_IDX,
  119. FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX,
  120. FM_AF_JUMP_ENABLE_INT_IDX,
  121. FM_AF_JUMP_ENABLE_INT_RESP_IDX,
  122. FM_AF_JUMP_START_AFJUMP_IDX,
  123. FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX,
  124. FM_AF_JUMP_RD_FREQ_IDX,
  125. FM_AF_JUMP_RD_FREQ_RESP_IDX,
  126. FM_LOW_RSSI_FINISH_IDX,
  127. /* Interrupt process post action */
  128. FM_SEND_INTMSK_CMD_IDX,
  129. FM_HANDLE_INTMSK_CMD_RESP_IDX,
  130. };
  131. /* FM interrupt handler table */
  132. static int_handler_prototype int_handler_table[] = {
  133. fm_irq_send_flag_getcmd,
  134. fm_irq_handle_flag_getcmd_resp,
  135. fm_irq_handle_hw_malfunction,
  136. fm_irq_handle_rds_start, /* RDS threshold reached irq handler */
  137. fm_irq_send_rdsdata_getcmd,
  138. fm_irq_handle_rdsdata_getcmd_resp,
  139. fm_irq_handle_rds_finish,
  140. fm_irq_handle_tune_op_ended,
  141. fm_irq_handle_power_enb, /* TX power enable irq handler */
  142. fm_irq_handle_low_rssi_start,
  143. fm_irq_afjump_set_pi,
  144. fm_irq_handle_set_pi_resp,
  145. fm_irq_afjump_set_pimask,
  146. fm_irq_handle_set_pimask_resp,
  147. fm_irq_afjump_setfreq,
  148. fm_irq_handle_setfreq_resp,
  149. fm_irq_afjump_enableint,
  150. fm_irq_afjump_enableint_resp,
  151. fm_irq_start_afjump,
  152. fm_irq_handle_start_afjump_resp,
  153. fm_irq_afjump_rd_freq,
  154. fm_irq_afjump_rd_freq_resp,
  155. fm_irq_handle_low_rssi_finish,
  156. fm_irq_send_intmsk_cmd, /* Interrupt process post action */
  157. fm_irq_handle_intmsk_cmd_resp
  158. };
  159. static long (*g_st_write) (struct sk_buff *skb);
  160. static struct completion wait_for_fmdrv_reg_comp;
  161. static inline void fm_irq_call(struct fmdev *fmdev)
  162. {
  163. fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev);
  164. }
  165. /* Continue next function in interrupt handler table */
  166. static inline void fm_irq_call_stage(struct fmdev *fmdev, u8 stage)
  167. {
  168. fmdev->irq_info.stage = stage;
  169. fm_irq_call(fmdev);
  170. }
  171. static inline void fm_irq_timeout_stage(struct fmdev *fmdev, u8 stage)
  172. {
  173. fmdev->irq_info.stage = stage;
  174. mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
  175. }
  176. #ifdef FM_DUMP_TXRX_PKT
  177. /* To dump outgoing FM Channel-8 packets */
  178. inline void dump_tx_skb_data(struct sk_buff *skb)
  179. {
  180. int len, len_org;
  181. u8 index;
  182. struct fm_cmd_msg_hdr *cmd_hdr;
  183. cmd_hdr = (struct fm_cmd_msg_hdr *)skb->data;
  184. printk(KERN_INFO "<<%shdr:%02x len:%02x opcode:%02x type:%s dlen:%02x",
  185. fm_cb(skb)->completion ? " " : "*", cmd_hdr->hdr,
  186. cmd_hdr->len, cmd_hdr->op,
  187. cmd_hdr->rd_wr ? "RD" : "WR", cmd_hdr->dlen);
  188. len_org = skb->len - FM_CMD_MSG_HDR_SIZE;
  189. if (len_org > 0) {
  190. printk(KERN_CONT "\n data(%d): ", cmd_hdr->dlen);
  191. len = min(len_org, 14);
  192. for (index = 0; index < len; index++)
  193. printk(KERN_CONT "%x ",
  194. skb->data[FM_CMD_MSG_HDR_SIZE + index]);
  195. printk(KERN_CONT "%s", (len_org > 14) ? ".." : "");
  196. }
  197. printk(KERN_CONT "\n");
  198. }
  199. /* To dump incoming FM Channel-8 packets */
  200. inline void dump_rx_skb_data(struct sk_buff *skb)
  201. {
  202. int len, len_org;
  203. u8 index;
  204. struct fm_event_msg_hdr *evt_hdr;
  205. evt_hdr = (struct fm_event_msg_hdr *)skb->data;
  206. printk(KERN_INFO ">> hdr:%02x len:%02x sts:%02x numhci:%02x opcode:%02x type:%s dlen:%02x",
  207. evt_hdr->hdr, evt_hdr->len,
  208. evt_hdr->status, evt_hdr->num_fm_hci_cmds, evt_hdr->op,
  209. (evt_hdr->rd_wr) ? "RD" : "WR", evt_hdr->dlen);
  210. len_org = skb->len - FM_EVT_MSG_HDR_SIZE;
  211. if (len_org > 0) {
  212. printk(KERN_CONT "\n data(%d): ", evt_hdr->dlen);
  213. len = min(len_org, 14);
  214. for (index = 0; index < len; index++)
  215. printk(KERN_CONT "%x ",
  216. skb->data[FM_EVT_MSG_HDR_SIZE + index]);
  217. printk(KERN_CONT "%s", (len_org > 14) ? ".." : "");
  218. }
  219. printk(KERN_CONT "\n");
  220. }
  221. #endif
  222. void fmc_update_region_info(struct fmdev *fmdev, u8 region_to_set)
  223. {
  224. fmdev->rx.region = region_configs[region_to_set];
  225. }
  226. /*
  227. * FM common sub-module will schedule this tasklet whenever it receives
  228. * FM packet from ST driver.
  229. */
  230. static void recv_tasklet(unsigned long arg)
  231. {
  232. struct fmdev *fmdev;
  233. struct fm_irq *irq_info;
  234. struct fm_event_msg_hdr *evt_hdr;
  235. struct sk_buff *skb;
  236. u8 num_fm_hci_cmds;
  237. unsigned long flags;
  238. fmdev = (struct fmdev *)arg;
  239. irq_info = &fmdev->irq_info;
  240. /* Process all packets in the RX queue */
  241. while ((skb = skb_dequeue(&fmdev->rx_q))) {
  242. if (skb->len < sizeof(struct fm_event_msg_hdr)) {
  243. fmerr("skb(%p) has only %d bytes, at least need %zu bytes to decode\n",
  244. skb,
  245. skb->len, sizeof(struct fm_event_msg_hdr));
  246. kfree_skb(skb);
  247. continue;
  248. }
  249. evt_hdr = (void *)skb->data;
  250. num_fm_hci_cmds = evt_hdr->num_fm_hci_cmds;
  251. /* FM interrupt packet? */
  252. if (evt_hdr->op == FM_INTERRUPT) {
  253. /* FM interrupt handler started already? */
  254. if (!test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
  255. set_bit(FM_INTTASK_RUNNING, &fmdev->flag);
  256. if (irq_info->stage != 0) {
  257. fmerr("Inval stage resetting to zero\n");
  258. irq_info->stage = 0;
  259. }
  260. /*
  261. * Execute first function in interrupt handler
  262. * table.
  263. */
  264. irq_info->handlers[irq_info->stage](fmdev);
  265. } else {
  266. set_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag);
  267. }
  268. kfree_skb(skb);
  269. }
  270. /* Anyone waiting for this with completion handler? */
  271. else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp != NULL) {
  272. spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
  273. fmdev->resp_skb = skb;
  274. spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
  275. complete(fmdev->resp_comp);
  276. fmdev->resp_comp = NULL;
  277. atomic_set(&fmdev->tx_cnt, 1);
  278. }
  279. /* Is this for interrupt handler? */
  280. else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp == NULL) {
  281. if (fmdev->resp_skb != NULL)
  282. fmerr("Response SKB ptr not NULL\n");
  283. spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
  284. fmdev->resp_skb = skb;
  285. spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
  286. /* Execute interrupt handler where state index points */
  287. irq_info->handlers[irq_info->stage](fmdev);
  288. kfree_skb(skb);
  289. atomic_set(&fmdev->tx_cnt, 1);
  290. } else {
  291. fmerr("Nobody claimed SKB(%p),purging\n", skb);
  292. }
  293. /*
  294. * Check flow control field. If Num_FM_HCI_Commands field is
  295. * not zero, schedule FM TX tasklet.
  296. */
  297. if (num_fm_hci_cmds && atomic_read(&fmdev->tx_cnt))
  298. if (!skb_queue_empty(&fmdev->tx_q))
  299. tasklet_schedule(&fmdev->tx_task);
  300. }
  301. }
  302. /* FM send tasklet: is scheduled when FM packet has to be sent to chip */
  303. static void send_tasklet(unsigned long arg)
  304. {
  305. struct fmdev *fmdev;
  306. struct sk_buff *skb;
  307. int len;
  308. fmdev = (struct fmdev *)arg;
  309. if (!atomic_read(&fmdev->tx_cnt))
  310. return;
  311. /* Check, is there any timeout happened to last transmitted packet */
  312. if ((jiffies - fmdev->last_tx_jiffies) > FM_DRV_TX_TIMEOUT) {
  313. fmerr("TX timeout occurred\n");
  314. atomic_set(&fmdev->tx_cnt, 1);
  315. }
  316. /* Send queued FM TX packets */
  317. skb = skb_dequeue(&fmdev->tx_q);
  318. if (!skb)
  319. return;
  320. atomic_dec(&fmdev->tx_cnt);
  321. fmdev->pre_op = fm_cb(skb)->fm_op;
  322. if (fmdev->resp_comp != NULL)
  323. fmerr("Response completion handler is not NULL\n");
  324. fmdev->resp_comp = fm_cb(skb)->completion;
  325. /* Write FM packet to ST driver */
  326. len = g_st_write(skb);
  327. if (len < 0) {
  328. kfree_skb(skb);
  329. fmdev->resp_comp = NULL;
  330. fmerr("TX tasklet failed to send skb(%p)\n", skb);
  331. atomic_set(&fmdev->tx_cnt, 1);
  332. } else {
  333. fmdev->last_tx_jiffies = jiffies;
  334. }
  335. }
  336. /*
  337. * Queues FM Channel-8 packet to FM TX queue and schedules FM TX tasklet for
  338. * transmission
  339. */
  340. static int fm_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
  341. int payload_len, struct completion *wait_completion)
  342. {
  343. struct sk_buff *skb;
  344. struct fm_cmd_msg_hdr *hdr;
  345. int size;
  346. if (fm_op >= FM_INTERRUPT) {
  347. fmerr("Invalid fm opcode - %d\n", fm_op);
  348. return -EINVAL;
  349. }
  350. if (test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) && payload == NULL) {
  351. fmerr("Payload data is NULL during fw download\n");
  352. return -EINVAL;
  353. }
  354. if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag))
  355. size =
  356. FM_CMD_MSG_HDR_SIZE + ((payload == NULL) ? 0 : payload_len);
  357. else
  358. size = payload_len;
  359. skb = alloc_skb(size, GFP_ATOMIC);
  360. if (!skb) {
  361. fmerr("No memory to create new SKB\n");
  362. return -ENOMEM;
  363. }
  364. /*
  365. * Don't fill FM header info for the commands which come from
  366. * FM firmware file.
  367. */
  368. if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) ||
  369. test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
  370. /* Fill command header info */
  371. hdr = skb_put(skb, FM_CMD_MSG_HDR_SIZE);
  372. hdr->hdr = FM_PKT_LOGICAL_CHAN_NUMBER; /* 0x08 */
  373. /* 3 (fm_opcode,rd_wr,dlen) + payload len) */
  374. hdr->len = ((payload == NULL) ? 0 : payload_len) + 3;
  375. /* FM opcode */
  376. hdr->op = fm_op;
  377. /* read/write type */
  378. hdr->rd_wr = type;
  379. hdr->dlen = payload_len;
  380. fm_cb(skb)->fm_op = fm_op;
  381. /*
  382. * If firmware download has finished and the command is
  383. * not a read command then payload is != NULL - a write
  384. * command with u16 payload - convert to be16
  385. */
  386. if (payload != NULL)
  387. *(__be16 *)payload = cpu_to_be16(*(u16 *)payload);
  388. } else if (payload != NULL) {
  389. fm_cb(skb)->fm_op = *((u8 *)payload + 2);
  390. }
  391. if (payload != NULL)
  392. skb_put_data(skb, payload, payload_len);
  393. fm_cb(skb)->completion = wait_completion;
  394. skb_queue_tail(&fmdev->tx_q, skb);
  395. tasklet_schedule(&fmdev->tx_task);
  396. return 0;
  397. }
  398. /* Sends FM Channel-8 command to the chip and waits for the response */
  399. int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
  400. unsigned int payload_len, void *response, int *response_len)
  401. {
  402. struct sk_buff *skb;
  403. struct fm_event_msg_hdr *evt_hdr;
  404. unsigned long flags;
  405. int ret;
  406. init_completion(&fmdev->maintask_comp);
  407. ret = fm_send_cmd(fmdev, fm_op, type, payload, payload_len,
  408. &fmdev->maintask_comp);
  409. if (ret)
  410. return ret;
  411. if (!wait_for_completion_timeout(&fmdev->maintask_comp,
  412. FM_DRV_TX_TIMEOUT)) {
  413. fmerr("Timeout(%d sec),didn't get regcompletion signal from RX tasklet\n",
  414. jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
  415. return -ETIMEDOUT;
  416. }
  417. if (!fmdev->resp_skb) {
  418. fmerr("Response SKB is missing\n");
  419. return -EFAULT;
  420. }
  421. spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
  422. skb = fmdev->resp_skb;
  423. fmdev->resp_skb = NULL;
  424. spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
  425. evt_hdr = (void *)skb->data;
  426. if (evt_hdr->status != 0) {
  427. fmerr("Received event pkt status(%d) is not zero\n",
  428. evt_hdr->status);
  429. kfree_skb(skb);
  430. return -EIO;
  431. }
  432. /* Send response data to caller */
  433. if (response != NULL && response_len != NULL && evt_hdr->dlen &&
  434. evt_hdr->dlen <= payload_len) {
  435. /* Skip header info and copy only response data */
  436. skb_pull(skb, sizeof(struct fm_event_msg_hdr));
  437. memcpy(response, skb->data, evt_hdr->dlen);
  438. *response_len = evt_hdr->dlen;
  439. } else if (response_len != NULL && evt_hdr->dlen == 0) {
  440. *response_len = 0;
  441. }
  442. kfree_skb(skb);
  443. return 0;
  444. }
  445. /* --- Helper functions used in FM interrupt handlers ---*/
  446. static inline int check_cmdresp_status(struct fmdev *fmdev,
  447. struct sk_buff **skb)
  448. {
  449. struct fm_event_msg_hdr *fm_evt_hdr;
  450. unsigned long flags;
  451. del_timer(&fmdev->irq_info.timer);
  452. spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
  453. *skb = fmdev->resp_skb;
  454. fmdev->resp_skb = NULL;
  455. spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
  456. fm_evt_hdr = (void *)(*skb)->data;
  457. if (fm_evt_hdr->status != 0) {
  458. fmerr("irq: opcode %x response status is not zero Initiating irq recovery process\n",
  459. fm_evt_hdr->op);
  460. mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
  461. return -1;
  462. }
  463. return 0;
  464. }
  465. static inline void fm_irq_common_cmd_resp_helper(struct fmdev *fmdev, u8 stage)
  466. {
  467. struct sk_buff *skb;
  468. if (!check_cmdresp_status(fmdev, &skb))
  469. fm_irq_call_stage(fmdev, stage);
  470. }
  471. /*
  472. * Interrupt process timeout handler.
  473. * One of the irq handler did not get proper response from the chip. So take
  474. * recovery action here. FM interrupts are disabled in the beginning of
  475. * interrupt process. Therefore reset stage index to re-enable default
  476. * interrupts. So that next interrupt will be processed as usual.
  477. */
  478. static void int_timeout_handler(struct timer_list *t)
  479. {
  480. struct fmdev *fmdev;
  481. struct fm_irq *fmirq;
  482. fmdbg("irq: timeout,trying to re-enable fm interrupts\n");
  483. fmdev = from_timer(fmdev, t, irq_info.timer);
  484. fmirq = &fmdev->irq_info;
  485. fmirq->retry++;
  486. if (fmirq->retry > FM_IRQ_TIMEOUT_RETRY_MAX) {
  487. /* Stop recovery action (interrupt reenable process) and
  488. * reset stage index & retry count values */
  489. fmirq->stage = 0;
  490. fmirq->retry = 0;
  491. fmerr("Recovery action failed duringirq processing, max retry reached\n");
  492. return;
  493. }
  494. fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
  495. }
  496. /* --------- FM interrupt handlers ------------*/
  497. static void fm_irq_send_flag_getcmd(struct fmdev *fmdev)
  498. {
  499. u16 flag;
  500. /* Send FLAG_GET command , to know the source of interrupt */
  501. if (!fm_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, sizeof(flag), NULL))
  502. fm_irq_timeout_stage(fmdev, FM_HANDLE_FLAG_GETCMD_RESP_IDX);
  503. }
  504. static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev)
  505. {
  506. struct sk_buff *skb;
  507. struct fm_event_msg_hdr *fm_evt_hdr;
  508. if (check_cmdresp_status(fmdev, &skb))
  509. return;
  510. fm_evt_hdr = (void *)skb->data;
  511. if (fm_evt_hdr->dlen > sizeof(fmdev->irq_info.flag))
  512. return;
  513. /* Skip header info and copy only response data */
  514. skb_pull(skb, sizeof(struct fm_event_msg_hdr));
  515. memcpy(&fmdev->irq_info.flag, skb->data, fm_evt_hdr->dlen);
  516. fmdev->irq_info.flag = be16_to_cpu((__force __be16)fmdev->irq_info.flag);
  517. fmdbg("irq: flag register(0x%x)\n", fmdev->irq_info.flag);
  518. /* Continue next function in interrupt handler table */
  519. fm_irq_call_stage(fmdev, FM_HW_MAL_FUNC_IDX);
  520. }
  521. static void fm_irq_handle_hw_malfunction(struct fmdev *fmdev)
  522. {
  523. if (fmdev->irq_info.flag & FM_MAL_EVENT & fmdev->irq_info.mask)
  524. fmerr("irq: HW MAL int received - do nothing\n");
  525. /* Continue next function in interrupt handler table */
  526. fm_irq_call_stage(fmdev, FM_RDS_START_IDX);
  527. }
  528. static void fm_irq_handle_rds_start(struct fmdev *fmdev)
  529. {
  530. if (fmdev->irq_info.flag & FM_RDS_EVENT & fmdev->irq_info.mask) {
  531. fmdbg("irq: rds threshold reached\n");
  532. fmdev->irq_info.stage = FM_RDS_SEND_RDS_GETCMD_IDX;
  533. } else {
  534. /* Continue next function in interrupt handler table */
  535. fmdev->irq_info.stage = FM_HW_TUNE_OP_ENDED_IDX;
  536. }
  537. fm_irq_call(fmdev);
  538. }
  539. static void fm_irq_send_rdsdata_getcmd(struct fmdev *fmdev)
  540. {
  541. /* Send the command to read RDS data from the chip */
  542. if (!fm_send_cmd(fmdev, RDS_DATA_GET, REG_RD, NULL,
  543. (FM_RX_RDS_FIFO_THRESHOLD * 3), NULL))
  544. fm_irq_timeout_stage(fmdev, FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX);
  545. }
  546. /* Keeps track of current RX channel AF (Alternate Frequency) */
  547. static void fm_rx_update_af_cache(struct fmdev *fmdev, u8 af)
  548. {
  549. struct tuned_station_info *stat_info = &fmdev->rx.stat_info;
  550. u8 reg_idx = fmdev->rx.region.fm_band;
  551. u8 index;
  552. u32 freq;
  553. /* First AF indicates the number of AF follows. Reset the list */
  554. if ((af >= FM_RDS_1_AF_FOLLOWS) && (af <= FM_RDS_25_AF_FOLLOWS)) {
  555. fmdev->rx.stat_info.af_list_max = (af - FM_RDS_1_AF_FOLLOWS + 1);
  556. fmdev->rx.stat_info.afcache_size = 0;
  557. fmdbg("No of expected AF : %d\n", fmdev->rx.stat_info.af_list_max);
  558. return;
  559. }
  560. if (af < FM_RDS_MIN_AF)
  561. return;
  562. if (reg_idx == FM_BAND_EUROPE_US && af > FM_RDS_MAX_AF)
  563. return;
  564. if (reg_idx == FM_BAND_JAPAN && af > FM_RDS_MAX_AF_JAPAN)
  565. return;
  566. freq = fmdev->rx.region.bot_freq + (af * 100);
  567. if (freq == fmdev->rx.freq) {
  568. fmdbg("Current freq(%d) is matching with received AF(%d)\n",
  569. fmdev->rx.freq, freq);
  570. return;
  571. }
  572. /* Do check in AF cache */
  573. for (index = 0; index < stat_info->afcache_size; index++) {
  574. if (stat_info->af_cache[index] == freq)
  575. break;
  576. }
  577. /* Reached the limit of the list - ignore the next AF */
  578. if (index == stat_info->af_list_max) {
  579. fmdbg("AF cache is full\n");
  580. return;
  581. }
  582. /*
  583. * If we reached the end of the list then this AF is not
  584. * in the list - add it.
  585. */
  586. if (index == stat_info->afcache_size) {
  587. fmdbg("Storing AF %d to cache index %d\n", freq, index);
  588. stat_info->af_cache[index] = freq;
  589. stat_info->afcache_size++;
  590. }
  591. }
  592. /*
  593. * Converts RDS buffer data from big endian format
  594. * to little endian format.
  595. */
  596. static void fm_rdsparse_swapbytes(struct fmdev *fmdev,
  597. struct fm_rdsdata_format *rds_format)
  598. {
  599. u8 index = 0;
  600. u8 *rds_buff;
  601. /*
  602. * Since in Orca the 2 RDS Data bytes are in little endian and
  603. * in Dolphin they are in big endian, the parsing of the RDS data
  604. * is chip dependent
  605. */
  606. if (fmdev->asci_id != 0x6350) {
  607. rds_buff = &rds_format->data.groupdatabuff.buff[0];
  608. while (index + 1 < FM_RX_RDS_INFO_FIELD_MAX) {
  609. swap(rds_buff[index], rds_buff[index + 1]);
  610. index += 2;
  611. }
  612. }
  613. }
  614. static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *fmdev)
  615. {
  616. struct sk_buff *skb;
  617. struct fm_rdsdata_format rds_fmt;
  618. struct fm_rds *rds = &fmdev->rx.rds;
  619. unsigned long group_idx, flags;
  620. u8 *rds_data, meta_data, tmpbuf[FM_RDS_BLK_SIZE];
  621. u8 type, blk_idx;
  622. u16 cur_picode;
  623. u32 rds_len;
  624. if (check_cmdresp_status(fmdev, &skb))
  625. return;
  626. /* Skip header info */
  627. skb_pull(skb, sizeof(struct fm_event_msg_hdr));
  628. rds_data = skb->data;
  629. rds_len = skb->len;
  630. /* Parse the RDS data */
  631. while (rds_len >= FM_RDS_BLK_SIZE) {
  632. meta_data = rds_data[2];
  633. /* Get the type: 0=A, 1=B, 2=C, 3=C', 4=D, 5=E */
  634. type = (meta_data & 0x07);
  635. /* Transform the blk type into index sequence (0, 1, 2, 3, 4) */
  636. blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1));
  637. fmdbg("Block index:%d(%s)\n", blk_idx,
  638. (meta_data & FM_RDS_STATUS_ERR_MASK) ? "Bad" : "Ok");
  639. if ((meta_data & FM_RDS_STATUS_ERR_MASK) != 0)
  640. break;
  641. if (blk_idx > FM_RDS_BLK_IDX_D) {
  642. fmdbg("Block sequence mismatch\n");
  643. rds->last_blk_idx = -1;
  644. break;
  645. }
  646. /* Skip checkword (control) byte and copy only data byte */
  647. memcpy(&rds_fmt.data.groupdatabuff.
  648. buff[blk_idx * (FM_RDS_BLK_SIZE - 1)],
  649. rds_data, (FM_RDS_BLK_SIZE - 1));
  650. rds->last_blk_idx = blk_idx;
  651. /* If completed a whole group then handle it */
  652. if (blk_idx == FM_RDS_BLK_IDX_D) {
  653. fmdbg("Good block received\n");
  654. fm_rdsparse_swapbytes(fmdev, &rds_fmt);
  655. /*
  656. * Extract PI code and store in local cache.
  657. * We need this during AF switch processing.
  658. */
  659. cur_picode = be16_to_cpu((__force __be16)rds_fmt.data.groupgeneral.pidata);
  660. if (fmdev->rx.stat_info.picode != cur_picode)
  661. fmdev->rx.stat_info.picode = cur_picode;
  662. fmdbg("picode:%d\n", cur_picode);
  663. group_idx = (rds_fmt.data.groupgeneral.blk_b[0] >> 3);
  664. fmdbg("(fmdrv):Group:%ld%s\n", group_idx/2,
  665. (group_idx % 2) ? "B" : "A");
  666. group_idx = 1 << (rds_fmt.data.groupgeneral.blk_b[0] >> 3);
  667. if (group_idx == FM_RDS_GROUP_TYPE_MASK_0A) {
  668. fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[0]);
  669. fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[1]);
  670. }
  671. }
  672. rds_len -= FM_RDS_BLK_SIZE;
  673. rds_data += FM_RDS_BLK_SIZE;
  674. }
  675. /* Copy raw rds data to internal rds buffer */
  676. rds_data = skb->data;
  677. rds_len = skb->len;
  678. spin_lock_irqsave(&fmdev->rds_buff_lock, flags);
  679. while (rds_len > 0) {
  680. /*
  681. * Fill RDS buffer as per V4L2 specification.
  682. * Store control byte
  683. */
  684. type = (rds_data[2] & 0x07);
  685. blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1));
  686. tmpbuf[2] = blk_idx; /* Offset name */
  687. tmpbuf[2] |= blk_idx << 3; /* Received offset */
  688. /* Store data byte */
  689. tmpbuf[0] = rds_data[0];
  690. tmpbuf[1] = rds_data[1];
  691. memcpy(&rds->buff[rds->wr_idx], &tmpbuf, FM_RDS_BLK_SIZE);
  692. rds->wr_idx = (rds->wr_idx + FM_RDS_BLK_SIZE) % rds->buf_size;
  693. /* Check for overflow & start over */
  694. if (rds->wr_idx == rds->rd_idx) {
  695. fmdbg("RDS buffer overflow\n");
  696. rds->wr_idx = 0;
  697. rds->rd_idx = 0;
  698. break;
  699. }
  700. rds_len -= FM_RDS_BLK_SIZE;
  701. rds_data += FM_RDS_BLK_SIZE;
  702. }
  703. spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
  704. /* Wakeup read queue */
  705. if (rds->wr_idx != rds->rd_idx)
  706. wake_up_interruptible(&rds->read_queue);
  707. fm_irq_call_stage(fmdev, FM_RDS_FINISH_IDX);
  708. }
  709. static void fm_irq_handle_rds_finish(struct fmdev *fmdev)
  710. {
  711. fm_irq_call_stage(fmdev, FM_HW_TUNE_OP_ENDED_IDX);
  712. }
  713. static void fm_irq_handle_tune_op_ended(struct fmdev *fmdev)
  714. {
  715. if (fmdev->irq_info.flag & (FM_FR_EVENT | FM_BL_EVENT) & fmdev->
  716. irq_info.mask) {
  717. fmdbg("irq: tune ended/bandlimit reached\n");
  718. if (test_and_clear_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag)) {
  719. fmdev->irq_info.stage = FM_AF_JUMP_RD_FREQ_IDX;
  720. } else {
  721. complete(&fmdev->maintask_comp);
  722. fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX;
  723. }
  724. } else
  725. fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX;
  726. fm_irq_call(fmdev);
  727. }
  728. static void fm_irq_handle_power_enb(struct fmdev *fmdev)
  729. {
  730. if (fmdev->irq_info.flag & FM_POW_ENB_EVENT) {
  731. fmdbg("irq: Power Enabled/Disabled\n");
  732. complete(&fmdev->maintask_comp);
  733. }
  734. fm_irq_call_stage(fmdev, FM_LOW_RSSI_START_IDX);
  735. }
  736. static void fm_irq_handle_low_rssi_start(struct fmdev *fmdev)
  737. {
  738. if ((fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON) &&
  739. (fmdev->irq_info.flag & FM_LEV_EVENT & fmdev->irq_info.mask) &&
  740. (fmdev->rx.freq != FM_UNDEFINED_FREQ) &&
  741. (fmdev->rx.stat_info.afcache_size != 0)) {
  742. fmdbg("irq: rssi level has fallen below threshold level\n");
  743. /* Disable further low RSSI interrupts */
  744. fmdev->irq_info.mask &= ~FM_LEV_EVENT;
  745. fmdev->rx.afjump_idx = 0;
  746. fmdev->rx.freq_before_jump = fmdev->rx.freq;
  747. fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX;
  748. } else {
  749. /* Continue next function in interrupt handler table */
  750. fmdev->irq_info.stage = FM_SEND_INTMSK_CMD_IDX;
  751. }
  752. fm_irq_call(fmdev);
  753. }
  754. static void fm_irq_afjump_set_pi(struct fmdev *fmdev)
  755. {
  756. u16 payload;
  757. /* Set PI code - must be updated if the AF list is not empty */
  758. payload = fmdev->rx.stat_info.picode;
  759. if (!fm_send_cmd(fmdev, RDS_PI_SET, REG_WR, &payload, sizeof(payload), NULL))
  760. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_RESP_IDX);
  761. }
  762. static void fm_irq_handle_set_pi_resp(struct fmdev *fmdev)
  763. {
  764. fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SETPI_MASK_IDX);
  765. }
  766. /*
  767. * Set PI mask.
  768. * 0xFFFF = Enable PI code matching
  769. * 0x0000 = Disable PI code matching
  770. */
  771. static void fm_irq_afjump_set_pimask(struct fmdev *fmdev)
  772. {
  773. u16 payload;
  774. payload = 0x0000;
  775. if (!fm_send_cmd(fmdev, RDS_PI_MASK_SET, REG_WR, &payload, sizeof(payload), NULL))
  776. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX);
  777. }
  778. static void fm_irq_handle_set_pimask_resp(struct fmdev *fmdev)
  779. {
  780. fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SET_AF_FREQ_IDX);
  781. }
  782. static void fm_irq_afjump_setfreq(struct fmdev *fmdev)
  783. {
  784. u16 frq_index;
  785. u16 payload;
  786. fmdbg("Swtich to %d KHz\n", fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx]);
  787. frq_index = (fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx] -
  788. fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
  789. payload = frq_index;
  790. if (!fm_send_cmd(fmdev, AF_FREQ_SET, REG_WR, &payload, sizeof(payload), NULL))
  791. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX);
  792. }
  793. static void fm_irq_handle_setfreq_resp(struct fmdev *fmdev)
  794. {
  795. fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_ENABLE_INT_IDX);
  796. }
  797. static void fm_irq_afjump_enableint(struct fmdev *fmdev)
  798. {
  799. u16 payload;
  800. /* Enable FR (tuning operation ended) interrupt */
  801. payload = FM_FR_EVENT;
  802. if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, sizeof(payload), NULL))
  803. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_ENABLE_INT_RESP_IDX);
  804. }
  805. static void fm_irq_afjump_enableint_resp(struct fmdev *fmdev)
  806. {
  807. fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_START_AFJUMP_IDX);
  808. }
  809. static void fm_irq_start_afjump(struct fmdev *fmdev)
  810. {
  811. u16 payload;
  812. payload = FM_TUNER_AF_JUMP_MODE;
  813. if (!fm_send_cmd(fmdev, TUNER_MODE_SET, REG_WR, &payload,
  814. sizeof(payload), NULL))
  815. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX);
  816. }
  817. static void fm_irq_handle_start_afjump_resp(struct fmdev *fmdev)
  818. {
  819. struct sk_buff *skb;
  820. if (check_cmdresp_status(fmdev, &skb))
  821. return;
  822. fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX;
  823. set_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag);
  824. clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
  825. }
  826. static void fm_irq_afjump_rd_freq(struct fmdev *fmdev)
  827. {
  828. u16 payload;
  829. if (!fm_send_cmd(fmdev, FREQ_SET, REG_RD, NULL, sizeof(payload), NULL))
  830. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_RD_FREQ_RESP_IDX);
  831. }
  832. static void fm_irq_afjump_rd_freq_resp(struct fmdev *fmdev)
  833. {
  834. struct sk_buff *skb;
  835. u16 read_freq;
  836. u32 curr_freq, jumped_freq;
  837. if (check_cmdresp_status(fmdev, &skb))
  838. return;
  839. /* Skip header info and copy only response data */
  840. skb_pull(skb, sizeof(struct fm_event_msg_hdr));
  841. memcpy(&read_freq, skb->data, sizeof(read_freq));
  842. read_freq = be16_to_cpu((__force __be16)read_freq);
  843. curr_freq = fmdev->rx.region.bot_freq + ((u32)read_freq * FM_FREQ_MUL);
  844. jumped_freq = fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx];
  845. /* If the frequency was changed the jump succeeded */
  846. if ((curr_freq != fmdev->rx.freq_before_jump) && (curr_freq == jumped_freq)) {
  847. fmdbg("Successfully switched to alternate freq %d\n", curr_freq);
  848. fmdev->rx.freq = curr_freq;
  849. fm_rx_reset_rds_cache(fmdev);
  850. /* AF feature is on, enable low level RSSI interrupt */
  851. if (fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON)
  852. fmdev->irq_info.mask |= FM_LEV_EVENT;
  853. fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX;
  854. } else { /* jump to the next freq in the AF list */
  855. fmdev->rx.afjump_idx++;
  856. /* If we reached the end of the list - stop searching */
  857. if (fmdev->rx.afjump_idx >= fmdev->rx.stat_info.afcache_size) {
  858. fmdbg("AF switch processing failed\n");
  859. fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX;
  860. } else { /* AF List is not over - try next one */
  861. fmdbg("Trying next freq in AF cache\n");
  862. fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX;
  863. }
  864. }
  865. fm_irq_call(fmdev);
  866. }
  867. static void fm_irq_handle_low_rssi_finish(struct fmdev *fmdev)
  868. {
  869. fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
  870. }
  871. static void fm_irq_send_intmsk_cmd(struct fmdev *fmdev)
  872. {
  873. u16 payload;
  874. /* Re-enable FM interrupts */
  875. payload = fmdev->irq_info.mask;
  876. if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
  877. sizeof(payload), NULL))
  878. fm_irq_timeout_stage(fmdev, FM_HANDLE_INTMSK_CMD_RESP_IDX);
  879. }
  880. static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *fmdev)
  881. {
  882. struct sk_buff *skb;
  883. if (check_cmdresp_status(fmdev, &skb))
  884. return;
  885. /*
  886. * This is last function in interrupt table to be executed.
  887. * So, reset stage index to 0.
  888. */
  889. fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX;
  890. /* Start processing any pending interrupt */
  891. if (test_and_clear_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag))
  892. fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev);
  893. else
  894. clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
  895. }
  896. /* Returns availability of RDS data in internel buffer */
  897. int fmc_is_rds_data_available(struct fmdev *fmdev, struct file *file,
  898. struct poll_table_struct *pts)
  899. {
  900. poll_wait(file, &fmdev->rx.rds.read_queue, pts);
  901. if (fmdev->rx.rds.rd_idx != fmdev->rx.rds.wr_idx)
  902. return 0;
  903. return -EAGAIN;
  904. }
  905. /* Copies RDS data from internal buffer to user buffer */
  906. int fmc_transfer_rds_from_internal_buff(struct fmdev *fmdev, struct file *file,
  907. u8 __user *buf, size_t count)
  908. {
  909. u32 block_count;
  910. u8 tmpbuf[FM_RDS_BLK_SIZE];
  911. unsigned long flags;
  912. int ret;
  913. if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) {
  914. if (file->f_flags & O_NONBLOCK)
  915. return -EWOULDBLOCK;
  916. ret = wait_event_interruptible(fmdev->rx.rds.read_queue,
  917. (fmdev->rx.rds.wr_idx != fmdev->rx.rds.rd_idx));
  918. if (ret)
  919. return -EINTR;
  920. }
  921. /* Calculate block count from byte count */
  922. count /= FM_RDS_BLK_SIZE;
  923. block_count = 0;
  924. ret = 0;
  925. while (block_count < count) {
  926. spin_lock_irqsave(&fmdev->rds_buff_lock, flags);
  927. if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) {
  928. spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
  929. break;
  930. }
  931. memcpy(tmpbuf, &fmdev->rx.rds.buff[fmdev->rx.rds.rd_idx],
  932. FM_RDS_BLK_SIZE);
  933. fmdev->rx.rds.rd_idx += FM_RDS_BLK_SIZE;
  934. if (fmdev->rx.rds.rd_idx >= fmdev->rx.rds.buf_size)
  935. fmdev->rx.rds.rd_idx = 0;
  936. spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
  937. if (copy_to_user(buf, tmpbuf, FM_RDS_BLK_SIZE))
  938. break;
  939. block_count++;
  940. buf += FM_RDS_BLK_SIZE;
  941. ret += FM_RDS_BLK_SIZE;
  942. }
  943. return ret;
  944. }
  945. int fmc_set_freq(struct fmdev *fmdev, u32 freq_to_set)
  946. {
  947. switch (fmdev->curr_fmmode) {
  948. case FM_MODE_RX:
  949. return fm_rx_set_freq(fmdev, freq_to_set);
  950. case FM_MODE_TX:
  951. return fm_tx_set_freq(fmdev, freq_to_set);
  952. default:
  953. return -EINVAL;
  954. }
  955. }
  956. int fmc_get_freq(struct fmdev *fmdev, u32 *cur_tuned_frq)
  957. {
  958. if (fmdev->rx.freq == FM_UNDEFINED_FREQ) {
  959. fmerr("RX frequency is not set\n");
  960. return -EPERM;
  961. }
  962. if (cur_tuned_frq == NULL) {
  963. fmerr("Invalid memory\n");
  964. return -ENOMEM;
  965. }
  966. switch (fmdev->curr_fmmode) {
  967. case FM_MODE_RX:
  968. *cur_tuned_frq = fmdev->rx.freq;
  969. return 0;
  970. case FM_MODE_TX:
  971. *cur_tuned_frq = 0; /* TODO : Change this later */
  972. return 0;
  973. default:
  974. return -EINVAL;
  975. }
  976. }
  977. int fmc_set_region(struct fmdev *fmdev, u8 region_to_set)
  978. {
  979. switch (fmdev->curr_fmmode) {
  980. case FM_MODE_RX:
  981. return fm_rx_set_region(fmdev, region_to_set);
  982. case FM_MODE_TX:
  983. return fm_tx_set_region(fmdev, region_to_set);
  984. default:
  985. return -EINVAL;
  986. }
  987. }
  988. int fmc_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
  989. {
  990. switch (fmdev->curr_fmmode) {
  991. case FM_MODE_RX:
  992. return fm_rx_set_mute_mode(fmdev, mute_mode_toset);
  993. case FM_MODE_TX:
  994. return fm_tx_set_mute_mode(fmdev, mute_mode_toset);
  995. default:
  996. return -EINVAL;
  997. }
  998. }
  999. int fmc_set_stereo_mono(struct fmdev *fmdev, u16 mode)
  1000. {
  1001. switch (fmdev->curr_fmmode) {
  1002. case FM_MODE_RX:
  1003. return fm_rx_set_stereo_mono(fmdev, mode);
  1004. case FM_MODE_TX:
  1005. return fm_tx_set_stereo_mono(fmdev, mode);
  1006. default:
  1007. return -EINVAL;
  1008. }
  1009. }
  1010. int fmc_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
  1011. {
  1012. switch (fmdev->curr_fmmode) {
  1013. case FM_MODE_RX:
  1014. return fm_rx_set_rds_mode(fmdev, rds_en_dis);
  1015. case FM_MODE_TX:
  1016. return fm_tx_set_rds_mode(fmdev, rds_en_dis);
  1017. default:
  1018. return -EINVAL;
  1019. }
  1020. }
  1021. /* Sends power off command to the chip */
  1022. static int fm_power_down(struct fmdev *fmdev)
  1023. {
  1024. u16 payload;
  1025. int ret;
  1026. if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
  1027. fmerr("FM core is not ready\n");
  1028. return -EPERM;
  1029. }
  1030. if (fmdev->curr_fmmode == FM_MODE_OFF) {
  1031. fmdbg("FM chip is already in OFF state\n");
  1032. return 0;
  1033. }
  1034. payload = 0x0;
  1035. ret = fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload,
  1036. sizeof(payload), NULL, NULL);
  1037. if (ret < 0)
  1038. return ret;
  1039. return fmc_release(fmdev);
  1040. }
  1041. /* Reads init command from FM firmware file and loads to the chip */
  1042. static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
  1043. {
  1044. const struct firmware *fw_entry;
  1045. struct bts_header *fw_header;
  1046. struct bts_action *action;
  1047. struct bts_action_delay *delay;
  1048. u8 *fw_data;
  1049. int ret, fw_len, cmd_cnt;
  1050. cmd_cnt = 0;
  1051. set_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
  1052. ret = request_firmware(&fw_entry, fw_name,
  1053. &fmdev->radio_dev->dev);
  1054. if (ret)
  1055. return ret;
  1056. fmdbg("Firmware(%s) length : %zu bytes\n", fw_name, fw_entry->size);
  1057. fw_data = (void *)fw_entry->data;
  1058. fw_len = fw_entry->size;
  1059. fw_header = (struct bts_header *)fw_data;
  1060. if (fw_header->magic != FM_FW_FILE_HEADER_MAGIC) {
  1061. fmerr("%s not a legal TI firmware file\n", fw_name);
  1062. ret = -EINVAL;
  1063. goto rel_fw;
  1064. }
  1065. fmdbg("FW(%s) magic number : 0x%x\n", fw_name, fw_header->magic);
  1066. /* Skip file header info , we already verified it */
  1067. fw_data += sizeof(struct bts_header);
  1068. fw_len -= sizeof(struct bts_header);
  1069. while (fw_data && fw_len > 0) {
  1070. action = (struct bts_action *)fw_data;
  1071. switch (action->type) {
  1072. case ACTION_SEND_COMMAND: /* Send */
  1073. ret = fmc_send_cmd(fmdev, 0, 0, action->data,
  1074. action->size, NULL, NULL);
  1075. if (ret)
  1076. goto rel_fw;
  1077. cmd_cnt++;
  1078. break;
  1079. case ACTION_DELAY: /* Delay */
  1080. delay = (struct bts_action_delay *)action->data;
  1081. mdelay(delay->msec);
  1082. break;
  1083. }
  1084. fw_data += (sizeof(struct bts_action) + (action->size));
  1085. fw_len -= (sizeof(struct bts_action) + (action->size));
  1086. }
  1087. fmdbg("Firmware commands(%d) loaded to chip\n", cmd_cnt);
  1088. rel_fw:
  1089. release_firmware(fw_entry);
  1090. clear_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
  1091. return ret;
  1092. }
  1093. /* Loads default RX configuration to the chip */
  1094. static int load_default_rx_configuration(struct fmdev *fmdev)
  1095. {
  1096. int ret;
  1097. ret = fm_rx_set_volume(fmdev, FM_DEFAULT_RX_VOLUME);
  1098. if (ret < 0)
  1099. return ret;
  1100. return fm_rx_set_rssi_threshold(fmdev, FM_DEFAULT_RSSI_THRESHOLD);
  1101. }
  1102. /* Does FM power on sequence */
  1103. static int fm_power_up(struct fmdev *fmdev, u8 mode)
  1104. {
  1105. u16 payload;
  1106. __be16 asic_id = 0, asic_ver = 0;
  1107. int resp_len, ret;
  1108. u8 fw_name[50];
  1109. if (mode >= FM_MODE_ENTRY_MAX) {
  1110. fmerr("Invalid firmware download option\n");
  1111. return -EINVAL;
  1112. }
  1113. /*
  1114. * Initialize FM common module. FM GPIO toggling is
  1115. * taken care in Shared Transport driver.
  1116. */
  1117. ret = fmc_prepare(fmdev);
  1118. if (ret < 0) {
  1119. fmerr("Unable to prepare FM Common\n");
  1120. return ret;
  1121. }
  1122. payload = FM_ENABLE;
  1123. if (fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload,
  1124. sizeof(payload), NULL, NULL))
  1125. goto rel;
  1126. /* Allow the chip to settle down in Channel-8 mode */
  1127. msleep(20);
  1128. if (fmc_send_cmd(fmdev, ASIC_ID_GET, REG_RD, NULL,
  1129. sizeof(asic_id), &asic_id, &resp_len))
  1130. goto rel;
  1131. if (fmc_send_cmd(fmdev, ASIC_VER_GET, REG_RD, NULL,
  1132. sizeof(asic_ver), &asic_ver, &resp_len))
  1133. goto rel;
  1134. fmdbg("ASIC ID: 0x%x , ASIC Version: %d\n",
  1135. be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
  1136. sprintf(fw_name, "%s_%x.%d.bts", FM_FMC_FW_FILE_START,
  1137. be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
  1138. ret = fm_download_firmware(fmdev, fw_name);
  1139. if (ret < 0) {
  1140. fmdbg("Failed to download firmware file %s\n", fw_name);
  1141. goto rel;
  1142. }
  1143. sprintf(fw_name, "%s_%x.%d.bts", (mode == FM_MODE_RX) ?
  1144. FM_RX_FW_FILE_START : FM_TX_FW_FILE_START,
  1145. be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
  1146. ret = fm_download_firmware(fmdev, fw_name);
  1147. if (ret < 0) {
  1148. fmdbg("Failed to download firmware file %s\n", fw_name);
  1149. goto rel;
  1150. } else
  1151. return ret;
  1152. rel:
  1153. return fmc_release(fmdev);
  1154. }
  1155. /* Set FM Modes(TX, RX, OFF) */
  1156. int fmc_set_mode(struct fmdev *fmdev, u8 fm_mode)
  1157. {
  1158. int ret = 0;
  1159. if (fm_mode >= FM_MODE_ENTRY_MAX) {
  1160. fmerr("Invalid FM mode\n");
  1161. return -EINVAL;
  1162. }
  1163. if (fmdev->curr_fmmode == fm_mode) {
  1164. fmdbg("Already fm is in mode(%d)\n", fm_mode);
  1165. return ret;
  1166. }
  1167. switch (fm_mode) {
  1168. case FM_MODE_OFF: /* OFF Mode */
  1169. ret = fm_power_down(fmdev);
  1170. if (ret < 0) {
  1171. fmerr("Failed to set OFF mode\n");
  1172. return ret;
  1173. }
  1174. break;
  1175. case FM_MODE_TX: /* TX Mode */
  1176. case FM_MODE_RX: /* RX Mode */
  1177. /* Power down before switching to TX or RX mode */
  1178. if (fmdev->curr_fmmode != FM_MODE_OFF) {
  1179. ret = fm_power_down(fmdev);
  1180. if (ret < 0) {
  1181. fmerr("Failed to set OFF mode\n");
  1182. return ret;
  1183. }
  1184. msleep(30);
  1185. }
  1186. ret = fm_power_up(fmdev, fm_mode);
  1187. if (ret < 0) {
  1188. fmerr("Failed to load firmware\n");
  1189. return ret;
  1190. }
  1191. }
  1192. fmdev->curr_fmmode = fm_mode;
  1193. /* Set default configuration */
  1194. if (fmdev->curr_fmmode == FM_MODE_RX) {
  1195. fmdbg("Loading default rx configuration..\n");
  1196. ret = load_default_rx_configuration(fmdev);
  1197. if (ret < 0)
  1198. fmerr("Failed to load default values\n");
  1199. }
  1200. return ret;
  1201. }
  1202. /* Returns current FM mode (TX, RX, OFF) */
  1203. int fmc_get_mode(struct fmdev *fmdev, u8 *fmmode)
  1204. {
  1205. if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
  1206. fmerr("FM core is not ready\n");
  1207. return -EPERM;
  1208. }
  1209. if (fmmode == NULL) {
  1210. fmerr("Invalid memory\n");
  1211. return -ENOMEM;
  1212. }
  1213. *fmmode = fmdev->curr_fmmode;
  1214. return 0;
  1215. }
  1216. /* Called by ST layer when FM packet is available */
  1217. static long fm_st_receive(void *arg, struct sk_buff *skb)
  1218. {
  1219. struct fmdev *fmdev;
  1220. fmdev = (struct fmdev *)arg;
  1221. if (skb == NULL) {
  1222. fmerr("Invalid SKB received from ST\n");
  1223. return -EFAULT;
  1224. }
  1225. if (skb->cb[0] != FM_PKT_LOGICAL_CHAN_NUMBER) {
  1226. fmerr("Received SKB (%p) is not FM Channel 8 pkt\n", skb);
  1227. return -EINVAL;
  1228. }
  1229. memcpy(skb_push(skb, 1), &skb->cb[0], 1);
  1230. skb_queue_tail(&fmdev->rx_q, skb);
  1231. tasklet_schedule(&fmdev->rx_task);
  1232. return 0;
  1233. }
  1234. /*
  1235. * Called by ST layer to indicate protocol registration completion
  1236. * status.
  1237. */
  1238. static void fm_st_reg_comp_cb(void *arg, int data)
  1239. {
  1240. struct fmdev *fmdev;
  1241. fmdev = (struct fmdev *)arg;
  1242. fmdev->streg_cbdata = data;
  1243. complete(&wait_for_fmdrv_reg_comp);
  1244. }
  1245. /*
  1246. * This function will be called from FM V4L2 open function.
  1247. * Register with ST driver and initialize driver data.
  1248. */
  1249. int fmc_prepare(struct fmdev *fmdev)
  1250. {
  1251. static struct st_proto_s fm_st_proto;
  1252. int ret;
  1253. if (test_bit(FM_CORE_READY, &fmdev->flag)) {
  1254. fmdbg("FM Core is already up\n");
  1255. return 0;
  1256. }
  1257. memset(&fm_st_proto, 0, sizeof(fm_st_proto));
  1258. fm_st_proto.recv = fm_st_receive;
  1259. fm_st_proto.match_packet = NULL;
  1260. fm_st_proto.reg_complete_cb = fm_st_reg_comp_cb;
  1261. fm_st_proto.write = NULL; /* TI ST driver will fill write pointer */
  1262. fm_st_proto.priv_data = fmdev;
  1263. fm_st_proto.chnl_id = 0x08;
  1264. fm_st_proto.max_frame_size = 0xff;
  1265. fm_st_proto.hdr_len = 1;
  1266. fm_st_proto.offset_len_in_hdr = 0;
  1267. fm_st_proto.len_size = 1;
  1268. fm_st_proto.reserve = 1;
  1269. ret = st_register(&fm_st_proto);
  1270. if (ret == -EINPROGRESS) {
  1271. init_completion(&wait_for_fmdrv_reg_comp);
  1272. fmdev->streg_cbdata = -EINPROGRESS;
  1273. fmdbg("%s waiting for ST reg completion signal\n", __func__);
  1274. if (!wait_for_completion_timeout(&wait_for_fmdrv_reg_comp,
  1275. FM_ST_REG_TIMEOUT)) {
  1276. fmerr("Timeout(%d sec), didn't get reg completion signal from ST\n",
  1277. jiffies_to_msecs(FM_ST_REG_TIMEOUT) / 1000);
  1278. return -ETIMEDOUT;
  1279. }
  1280. if (fmdev->streg_cbdata != 0) {
  1281. fmerr("ST reg comp CB called with error status %d\n",
  1282. fmdev->streg_cbdata);
  1283. return -EAGAIN;
  1284. }
  1285. ret = 0;
  1286. } else if (ret == -1) {
  1287. fmerr("st_register failed %d\n", ret);
  1288. return -EAGAIN;
  1289. }
  1290. if (fm_st_proto.write != NULL) {
  1291. g_st_write = fm_st_proto.write;
  1292. } else {
  1293. fmerr("Failed to get ST write func pointer\n");
  1294. ret = st_unregister(&fm_st_proto);
  1295. if (ret < 0)
  1296. fmerr("st_unregister failed %d\n", ret);
  1297. return -EAGAIN;
  1298. }
  1299. spin_lock_init(&fmdev->rds_buff_lock);
  1300. spin_lock_init(&fmdev->resp_skb_lock);
  1301. /* Initialize TX queue and TX tasklet */
  1302. skb_queue_head_init(&fmdev->tx_q);
  1303. tasklet_init(&fmdev->tx_task, send_tasklet, (unsigned long)fmdev);
  1304. /* Initialize RX Queue and RX tasklet */
  1305. skb_queue_head_init(&fmdev->rx_q);
  1306. tasklet_init(&fmdev->rx_task, recv_tasklet, (unsigned long)fmdev);
  1307. fmdev->irq_info.stage = 0;
  1308. atomic_set(&fmdev->tx_cnt, 1);
  1309. fmdev->resp_comp = NULL;
  1310. timer_setup(&fmdev->irq_info.timer, int_timeout_handler, 0);
  1311. /*TODO: add FM_STIC_EVENT later */
  1312. fmdev->irq_info.mask = FM_MAL_EVENT;
  1313. /* Region info */
  1314. fmdev->rx.region = region_configs[default_radio_region];
  1315. fmdev->rx.mute_mode = FM_MUTE_OFF;
  1316. fmdev->rx.rf_depend_mute = FM_RX_RF_DEPENDENT_MUTE_OFF;
  1317. fmdev->rx.rds.flag = FM_RDS_DISABLE;
  1318. fmdev->rx.freq = FM_UNDEFINED_FREQ;
  1319. fmdev->rx.rds_mode = FM_RDS_SYSTEM_RDS;
  1320. fmdev->rx.af_mode = FM_RX_RDS_AF_SWITCH_MODE_OFF;
  1321. fmdev->irq_info.retry = 0;
  1322. fm_rx_reset_rds_cache(fmdev);
  1323. init_waitqueue_head(&fmdev->rx.rds.read_queue);
  1324. fm_rx_reset_station_info(fmdev);
  1325. set_bit(FM_CORE_READY, &fmdev->flag);
  1326. return ret;
  1327. }
  1328. /*
  1329. * This function will be called from FM V4L2 release function.
  1330. * Unregister from ST driver.
  1331. */
  1332. int fmc_release(struct fmdev *fmdev)
  1333. {
  1334. static struct st_proto_s fm_st_proto;
  1335. int ret;
  1336. if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
  1337. fmdbg("FM Core is already down\n");
  1338. return 0;
  1339. }
  1340. /* Service pending read */
  1341. wake_up_interruptible(&fmdev->rx.rds.read_queue);
  1342. tasklet_kill(&fmdev->tx_task);
  1343. tasklet_kill(&fmdev->rx_task);
  1344. skb_queue_purge(&fmdev->tx_q);
  1345. skb_queue_purge(&fmdev->rx_q);
  1346. fmdev->resp_comp = NULL;
  1347. fmdev->rx.freq = 0;
  1348. memset(&fm_st_proto, 0, sizeof(fm_st_proto));
  1349. fm_st_proto.chnl_id = 0x08;
  1350. ret = st_unregister(&fm_st_proto);
  1351. if (ret < 0)
  1352. fmerr("Failed to de-register FM from ST %d\n", ret);
  1353. else
  1354. fmdbg("Successfully unregistered from ST\n");
  1355. clear_bit(FM_CORE_READY, &fmdev->flag);
  1356. return ret;
  1357. }
  1358. /*
  1359. * Module init function. Ask FM V4L module to register video device.
  1360. * Allocate memory for FM driver context and RX RDS buffer.
  1361. */
  1362. static int __init fm_drv_init(void)
  1363. {
  1364. struct fmdev *fmdev = NULL;
  1365. int ret = -ENOMEM;
  1366. fmdbg("FM driver version %s\n", FM_DRV_VERSION);
  1367. fmdev = kzalloc(sizeof(struct fmdev), GFP_KERNEL);
  1368. if (NULL == fmdev) {
  1369. fmerr("Can't allocate operation structure memory\n");
  1370. return ret;
  1371. }
  1372. fmdev->rx.rds.buf_size = default_rds_buf * FM_RDS_BLK_SIZE;
  1373. fmdev->rx.rds.buff = kzalloc(fmdev->rx.rds.buf_size, GFP_KERNEL);
  1374. if (NULL == fmdev->rx.rds.buff) {
  1375. fmerr("Can't allocate rds ring buffer\n");
  1376. goto rel_dev;
  1377. }
  1378. ret = fm_v4l2_init_video_device(fmdev, radio_nr);
  1379. if (ret < 0)
  1380. goto rel_rdsbuf;
  1381. fmdev->irq_info.handlers = int_handler_table;
  1382. fmdev->curr_fmmode = FM_MODE_OFF;
  1383. fmdev->tx_data.pwr_lvl = FM_PWR_LVL_DEF;
  1384. fmdev->tx_data.preemph = FM_TX_PREEMPH_50US;
  1385. return ret;
  1386. rel_rdsbuf:
  1387. kfree(fmdev->rx.rds.buff);
  1388. rel_dev:
  1389. kfree(fmdev);
  1390. return ret;
  1391. }
  1392. /* Module exit function. Ask FM V4L module to unregister video device */
  1393. static void __exit fm_drv_exit(void)
  1394. {
  1395. struct fmdev *fmdev = NULL;
  1396. fmdev = fm_v4l2_deinit_video_device();
  1397. if (fmdev != NULL) {
  1398. kfree(fmdev->rx.rds.buff);
  1399. kfree(fmdev);
  1400. }
  1401. }
  1402. module_init(fm_drv_init);
  1403. module_exit(fm_drv_exit);
  1404. /* ------------- Module Info ------------- */
  1405. MODULE_AUTHOR("Manjunatha Halli <manjunatha_halli@ti.com>");
  1406. MODULE_DESCRIPTION("FM Driver for TI's Connectivity chip. " FM_DRV_VERSION);
  1407. MODULE_VERSION(FM_DRV_VERSION);
  1408. MODULE_LICENSE("GPL");