lx_core.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357
  1. /* -*- linux-c -*- *
  2. *
  3. * ALSA driver for the digigram lx6464es interface
  4. * low-level interface
  5. *
  6. * Copyright (c) 2009 Tim Blechmann <tim@klingt.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; see the file COPYING. If not, write to
  20. * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  21. * Boston, MA 02111-1307, USA.
  22. *
  23. */
  24. /* #define RMH_DEBUG 1 */
  25. #include <linux/module.h>
  26. #include <linux/pci.h>
  27. #include <linux/delay.h>
  28. #include "lx6464es.h"
  29. #include "lx_core.h"
  30. /* low-level register access */
  31. static const unsigned long dsp_port_offsets[] = {
  32. 0,
  33. 0x400,
  34. 0x401,
  35. 0x402,
  36. 0x403,
  37. 0x404,
  38. 0x405,
  39. 0x406,
  40. 0x407,
  41. 0x408,
  42. 0x409,
  43. 0x40a,
  44. 0x40b,
  45. 0x40c,
  46. 0x410,
  47. 0x411,
  48. 0x412,
  49. 0x413,
  50. 0x414,
  51. 0x415,
  52. 0x416,
  53. 0x420,
  54. 0x430,
  55. 0x431,
  56. 0x432,
  57. 0x433,
  58. 0x434,
  59. 0x440
  60. };
  61. static void __iomem *lx_dsp_register(struct lx6464es *chip, int port)
  62. {
  63. void __iomem *base_address = chip->port_dsp_bar;
  64. return base_address + dsp_port_offsets[port]*4;
  65. }
  66. unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
  67. {
  68. void __iomem *address = lx_dsp_register(chip, port);
  69. return ioread32(address);
  70. }
  71. void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len)
  72. {
  73. u32 __iomem *address = lx_dsp_register(chip, port);
  74. int i;
  75. /* we cannot use memcpy_fromio */
  76. for (i = 0; i != len; ++i)
  77. data[i] = ioread32(address + i);
  78. }
  79. void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
  80. {
  81. void __iomem *address = lx_dsp_register(chip, port);
  82. iowrite32(data, address);
  83. }
  84. void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
  85. u32 len)
  86. {
  87. u32 __iomem *address = lx_dsp_register(chip, port);
  88. int i;
  89. /* we cannot use memcpy_to */
  90. for (i = 0; i != len; ++i)
  91. iowrite32(data[i], address + i);
  92. }
  93. static const unsigned long plx_port_offsets[] = {
  94. 0x04,
  95. 0x40,
  96. 0x44,
  97. 0x48,
  98. 0x4c,
  99. 0x50,
  100. 0x54,
  101. 0x58,
  102. 0x5c,
  103. 0x64,
  104. 0x68,
  105. 0x6C
  106. };
  107. static void __iomem *lx_plx_register(struct lx6464es *chip, int port)
  108. {
  109. void __iomem *base_address = chip->port_plx_remapped;
  110. return base_address + plx_port_offsets[port];
  111. }
  112. unsigned long lx_plx_reg_read(struct lx6464es *chip, int port)
  113. {
  114. void __iomem *address = lx_plx_register(chip, port);
  115. return ioread32(address);
  116. }
  117. void lx_plx_reg_write(struct lx6464es *chip, int port, u32 data)
  118. {
  119. void __iomem *address = lx_plx_register(chip, port);
  120. iowrite32(data, address);
  121. }
  122. u32 lx_plx_mbox_read(struct lx6464es *chip, int mbox_nr)
  123. {
  124. int index;
  125. switch (mbox_nr) {
  126. case 1:
  127. index = ePLX_MBOX1; break;
  128. case 2:
  129. index = ePLX_MBOX2; break;
  130. case 3:
  131. index = ePLX_MBOX3; break;
  132. case 4:
  133. index = ePLX_MBOX4; break;
  134. case 5:
  135. index = ePLX_MBOX5; break;
  136. case 6:
  137. index = ePLX_MBOX6; break;
  138. case 7:
  139. index = ePLX_MBOX7; break;
  140. case 0: /* reserved for HF flags */
  141. snd_BUG();
  142. default:
  143. return 0xdeadbeef;
  144. }
  145. return lx_plx_reg_read(chip, index);
  146. }
  147. int lx_plx_mbox_write(struct lx6464es *chip, int mbox_nr, u32 value)
  148. {
  149. int index = -1;
  150. switch (mbox_nr) {
  151. case 1:
  152. index = ePLX_MBOX1; break;
  153. case 3:
  154. index = ePLX_MBOX3; break;
  155. case 4:
  156. index = ePLX_MBOX4; break;
  157. case 5:
  158. index = ePLX_MBOX5; break;
  159. case 6:
  160. index = ePLX_MBOX6; break;
  161. case 7:
  162. index = ePLX_MBOX7; break;
  163. case 0: /* reserved for HF flags */
  164. case 2: /* reserved for Pipe States
  165. * the DSP keeps an image of it */
  166. snd_BUG();
  167. return -EBADRQC;
  168. }
  169. lx_plx_reg_write(chip, index, value);
  170. return 0;
  171. }
  172. /* rmh */
  173. #ifdef CONFIG_SND_DEBUG
  174. #define CMD_NAME(a) a
  175. #else
  176. #define CMD_NAME(a) NULL
  177. #endif
  178. #define Reg_CSM_MR 0x00000002
  179. #define Reg_CSM_MC 0x00000001
  180. struct dsp_cmd_info {
  181. u32 dcCodeOp; /* Op Code of the command (usually 1st 24-bits
  182. * word).*/
  183. u16 dcCmdLength; /* Command length in words of 24 bits.*/
  184. u16 dcStatusType; /* Status type: 0 for fixed length, 1 for
  185. * random. */
  186. u16 dcStatusLength; /* Status length (if fixed).*/
  187. char *dcOpName;
  188. };
  189. /*
  190. Initialization and control data for the Microblaze interface
  191. - OpCode:
  192. the opcode field of the command set at the proper offset
  193. - CmdLength
  194. the number of command words
  195. - StatusType
  196. offset in the status registers: 0 means that the return value may be
  197. different from 0, and must be read
  198. - StatusLength
  199. the number of status words (in addition to the return value)
  200. */
  201. static struct dsp_cmd_info dsp_commands[] =
  202. {
  203. { (CMD_00_INFO_DEBUG << OPCODE_OFFSET) , 1 /*custom*/
  204. , 1 , 0 /**/ , CMD_NAME("INFO_DEBUG") },
  205. { (CMD_01_GET_SYS_CFG << OPCODE_OFFSET) , 1 /**/
  206. , 1 , 2 /**/ , CMD_NAME("GET_SYS_CFG") },
  207. { (CMD_02_SET_GRANULARITY << OPCODE_OFFSET) , 1 /**/
  208. , 1 , 0 /**/ , CMD_NAME("SET_GRANULARITY") },
  209. { (CMD_03_SET_TIMER_IRQ << OPCODE_OFFSET) , 1 /**/
  210. , 1 , 0 /**/ , CMD_NAME("SET_TIMER_IRQ") },
  211. { (CMD_04_GET_EVENT << OPCODE_OFFSET) , 1 /**/
  212. , 1 , 0 /*up to 10*/ , CMD_NAME("GET_EVENT") },
  213. { (CMD_05_GET_PIPES << OPCODE_OFFSET) , 1 /**/
  214. , 1 , 2 /*up to 4*/ , CMD_NAME("GET_PIPES") },
  215. { (CMD_06_ALLOCATE_PIPE << OPCODE_OFFSET) , 1 /**/
  216. , 0 , 0 /**/ , CMD_NAME("ALLOCATE_PIPE") },
  217. { (CMD_07_RELEASE_PIPE << OPCODE_OFFSET) , 1 /**/
  218. , 0 , 0 /**/ , CMD_NAME("RELEASE_PIPE") },
  219. { (CMD_08_ASK_BUFFERS << OPCODE_OFFSET) , 1 /**/
  220. , 1 , MAX_STREAM_BUFFER , CMD_NAME("ASK_BUFFERS") },
  221. { (CMD_09_STOP_PIPE << OPCODE_OFFSET) , 1 /**/
  222. , 0 , 0 /*up to 2*/ , CMD_NAME("STOP_PIPE") },
  223. { (CMD_0A_GET_PIPE_SPL_COUNT << OPCODE_OFFSET) , 1 /**/
  224. , 1 , 1 /*up to 2*/ , CMD_NAME("GET_PIPE_SPL_COUNT") },
  225. { (CMD_0B_TOGGLE_PIPE_STATE << OPCODE_OFFSET) , 1 /*up to 5*/
  226. , 1 , 0 /**/ , CMD_NAME("TOGGLE_PIPE_STATE") },
  227. { (CMD_0C_DEF_STREAM << OPCODE_OFFSET) , 1 /*up to 4*/
  228. , 1 , 0 /**/ , CMD_NAME("DEF_STREAM") },
  229. { (CMD_0D_SET_MUTE << OPCODE_OFFSET) , 3 /**/
  230. , 1 , 0 /**/ , CMD_NAME("SET_MUTE") },
  231. { (CMD_0E_GET_STREAM_SPL_COUNT << OPCODE_OFFSET) , 1/**/
  232. , 1 , 2 /**/ , CMD_NAME("GET_STREAM_SPL_COUNT") },
  233. { (CMD_0F_UPDATE_BUFFER << OPCODE_OFFSET) , 3 /*up to 4*/
  234. , 0 , 1 /**/ , CMD_NAME("UPDATE_BUFFER") },
  235. { (CMD_10_GET_BUFFER << OPCODE_OFFSET) , 1 /**/
  236. , 1 , 4 /**/ , CMD_NAME("GET_BUFFER") },
  237. { (CMD_11_CANCEL_BUFFER << OPCODE_OFFSET) , 1 /**/
  238. , 1 , 1 /*up to 4*/ , CMD_NAME("CANCEL_BUFFER") },
  239. { (CMD_12_GET_PEAK << OPCODE_OFFSET) , 1 /**/
  240. , 1 , 1 /**/ , CMD_NAME("GET_PEAK") },
  241. { (CMD_13_SET_STREAM_STATE << OPCODE_OFFSET) , 1 /**/
  242. , 1 , 0 /**/ , CMD_NAME("SET_STREAM_STATE") },
  243. };
  244. static void lx_message_init(struct lx_rmh *rmh, enum cmd_mb_opcodes cmd)
  245. {
  246. snd_BUG_ON(cmd >= CMD_14_INVALID);
  247. rmh->cmd[0] = dsp_commands[cmd].dcCodeOp;
  248. rmh->cmd_len = dsp_commands[cmd].dcCmdLength;
  249. rmh->stat_len = dsp_commands[cmd].dcStatusLength;
  250. rmh->dsp_stat = dsp_commands[cmd].dcStatusType;
  251. rmh->cmd_idx = cmd;
  252. memset(&rmh->cmd[1], 0, (REG_CRM_NUMBER - 1) * sizeof(u32));
  253. #ifdef CONFIG_SND_DEBUG
  254. memset(rmh->stat, 0, REG_CRM_NUMBER * sizeof(u32));
  255. #endif
  256. #ifdef RMH_DEBUG
  257. rmh->cmd_idx = cmd;
  258. #endif
  259. }
  260. #ifdef RMH_DEBUG
  261. #define LXRMH "lx6464es rmh: "
  262. static void lx_message_dump(struct lx_rmh *rmh)
  263. {
  264. u8 idx = rmh->cmd_idx;
  265. int i;
  266. snd_printk(LXRMH "command %s\n", dsp_commands[idx].dcOpName);
  267. for (i = 0; i != rmh->cmd_len; ++i)
  268. snd_printk(LXRMH "\tcmd[%d] %08x\n", i, rmh->cmd[i]);
  269. for (i = 0; i != rmh->stat_len; ++i)
  270. snd_printk(LXRMH "\tstat[%d]: %08x\n", i, rmh->stat[i]);
  271. snd_printk("\n");
  272. }
  273. #else
  274. static inline void lx_message_dump(struct lx_rmh *rmh)
  275. {}
  276. #endif
  277. /* sleep 500 - 100 = 400 times 100us -> the timeout is >= 40 ms */
  278. #define XILINX_TIMEOUT_MS 40
  279. #define XILINX_POLL_NO_SLEEP 100
  280. #define XILINX_POLL_ITERATIONS 150
  281. static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh)
  282. {
  283. u32 reg = ED_DSP_TIMED_OUT;
  284. int dwloop;
  285. if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) {
  286. snd_printk(KERN_ERR LXP "PIOSendMessage eReg_CSM %x\n", reg);
  287. return -EBUSY;
  288. }
  289. /* write command */
  290. lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len);
  291. /* MicoBlaze gogogo */
  292. lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC);
  293. /* wait for device to answer */
  294. for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS * 1000; ++dwloop) {
  295. if (lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR) {
  296. if (rmh->dsp_stat == 0)
  297. reg = lx_dsp_reg_read(chip, eReg_CRM1);
  298. else
  299. reg = 0;
  300. goto polling_successful;
  301. } else
  302. udelay(1);
  303. }
  304. snd_printk(KERN_WARNING LXP "TIMEOUT lx_message_send_atomic! "
  305. "polling failed\n");
  306. polling_successful:
  307. if ((reg & ERROR_VALUE) == 0) {
  308. /* read response */
  309. if (rmh->stat_len) {
  310. snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1));
  311. lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat,
  312. rmh->stat_len);
  313. }
  314. } else
  315. snd_printk(LXP "rmh error: %08x\n", reg);
  316. /* clear Reg_CSM_MR */
  317. lx_dsp_reg_write(chip, eReg_CSM, 0);
  318. switch (reg) {
  319. case ED_DSP_TIMED_OUT:
  320. snd_printk(KERN_WARNING LXP "lx_message_send: dsp timeout\n");
  321. return -ETIMEDOUT;
  322. case ED_DSP_CRASHED:
  323. snd_printk(KERN_WARNING LXP "lx_message_send: dsp crashed\n");
  324. return -EAGAIN;
  325. }
  326. lx_message_dump(rmh);
  327. return reg;
  328. }
  329. /* low-level dsp access */
  330. int __devinit lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
  331. {
  332. u16 ret;
  333. unsigned long flags;
  334. spin_lock_irqsave(&chip->msg_lock, flags);
  335. lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
  336. ret = lx_message_send_atomic(chip, &chip->rmh);
  337. *rdsp_version = chip->rmh.stat[1];
  338. spin_unlock_irqrestore(&chip->msg_lock, flags);
  339. return ret;
  340. }
  341. int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq)
  342. {
  343. u16 ret = 0;
  344. unsigned long flags;
  345. u32 freq_raw = 0;
  346. u32 freq = 0;
  347. u32 frequency = 0;
  348. spin_lock_irqsave(&chip->msg_lock, flags);
  349. lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
  350. ret = lx_message_send_atomic(chip, &chip->rmh);
  351. if (ret == 0) {
  352. freq_raw = chip->rmh.stat[0] >> FREQ_FIELD_OFFSET;
  353. freq = freq_raw & XES_FREQ_COUNT8_MASK;
  354. if ((freq < XES_FREQ_COUNT8_48_MAX) ||
  355. (freq > XES_FREQ_COUNT8_44_MIN))
  356. frequency = 0; /* unknown */
  357. else if (freq >= XES_FREQ_COUNT8_44_MAX)
  358. frequency = 44100;
  359. else
  360. frequency = 48000;
  361. }
  362. spin_unlock_irqrestore(&chip->msg_lock, flags);
  363. *rfreq = frequency * chip->freq_ratio;
  364. return ret;
  365. }
  366. int lx_dsp_get_mac(struct lx6464es *chip, u8 *mac_address)
  367. {
  368. u32 macmsb, maclsb;
  369. macmsb = lx_dsp_reg_read(chip, eReg_ADMACESMSB) & 0x00FFFFFF;
  370. maclsb = lx_dsp_reg_read(chip, eReg_ADMACESLSB) & 0x00FFFFFF;
  371. /* todo: endianess handling */
  372. mac_address[5] = ((u8 *)(&maclsb))[0];
  373. mac_address[4] = ((u8 *)(&maclsb))[1];
  374. mac_address[3] = ((u8 *)(&maclsb))[2];
  375. mac_address[2] = ((u8 *)(&macmsb))[0];
  376. mac_address[1] = ((u8 *)(&macmsb))[1];
  377. mac_address[0] = ((u8 *)(&macmsb))[2];
  378. return 0;
  379. }
  380. int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran)
  381. {
  382. unsigned long flags;
  383. int ret;
  384. spin_lock_irqsave(&chip->msg_lock, flags);
  385. lx_message_init(&chip->rmh, CMD_02_SET_GRANULARITY);
  386. chip->rmh.cmd[0] |= gran;
  387. ret = lx_message_send_atomic(chip, &chip->rmh);
  388. spin_unlock_irqrestore(&chip->msg_lock, flags);
  389. return ret;
  390. }
  391. int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data)
  392. {
  393. unsigned long flags;
  394. int ret;
  395. spin_lock_irqsave(&chip->msg_lock, flags);
  396. lx_message_init(&chip->rmh, CMD_04_GET_EVENT);
  397. chip->rmh.stat_len = 9; /* we don't necessarily need the full length */
  398. ret = lx_message_send_atomic(chip, &chip->rmh);
  399. if (!ret)
  400. memcpy(data, chip->rmh.stat, chip->rmh.stat_len * sizeof(u32));
  401. spin_unlock_irqrestore(&chip->msg_lock, flags);
  402. return ret;
  403. }
  404. #define CSES_TIMEOUT 100 /* microseconds */
  405. #define CSES_CE 0x0001
  406. #define CSES_BROADCAST 0x0002
  407. #define CSES_UPDATE_LDSV 0x0004
  408. int lx_dsp_es_check_pipeline(struct lx6464es *chip)
  409. {
  410. int i;
  411. for (i = 0; i != CSES_TIMEOUT; ++i) {
  412. /*
  413. * le bit CSES_UPDATE_LDSV est à 1 dés que le macprog
  414. * est pret. il re-passe à 0 lorsque le premier read a
  415. * été fait. pour l'instant on retire le test car ce bit
  416. * passe a 1 environ 200 à 400 ms aprés que le registre
  417. * confES à été écrit (kick du xilinx ES).
  418. *
  419. * On ne teste que le bit CE.
  420. * */
  421. u32 cses = lx_dsp_reg_read(chip, eReg_CSES);
  422. if ((cses & CSES_CE) == 0)
  423. return 0;
  424. udelay(1);
  425. }
  426. return -ETIMEDOUT;
  427. }
  428. #define PIPE_INFO_TO_CMD(capture, pipe) \
  429. ((u32)((u32)(pipe) | ((capture) ? ID_IS_CAPTURE : 0L)) << ID_OFFSET)
  430. /* low-level pipe handling */
  431. int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture,
  432. int channels)
  433. {
  434. int err;
  435. unsigned long flags;
  436. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  437. spin_lock_irqsave(&chip->msg_lock, flags);
  438. lx_message_init(&chip->rmh, CMD_06_ALLOCATE_PIPE);
  439. chip->rmh.cmd[0] |= pipe_cmd;
  440. chip->rmh.cmd[0] |= channels;
  441. err = lx_message_send_atomic(chip, &chip->rmh);
  442. spin_unlock_irqrestore(&chip->msg_lock, flags);
  443. if (err != 0)
  444. snd_printk(KERN_ERR "lx6464es: could not allocate pipe\n");
  445. return err;
  446. }
  447. int lx_pipe_release(struct lx6464es *chip, u32 pipe, int is_capture)
  448. {
  449. int err;
  450. unsigned long flags;
  451. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  452. spin_lock_irqsave(&chip->msg_lock, flags);
  453. lx_message_init(&chip->rmh, CMD_07_RELEASE_PIPE);
  454. chip->rmh.cmd[0] |= pipe_cmd;
  455. err = lx_message_send_atomic(chip, &chip->rmh);
  456. spin_unlock_irqrestore(&chip->msg_lock, flags);
  457. return err;
  458. }
  459. int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
  460. u32 *r_needed, u32 *r_freed, u32 *size_array)
  461. {
  462. int err;
  463. unsigned long flags;
  464. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  465. #ifdef CONFIG_SND_DEBUG
  466. if (size_array)
  467. memset(size_array, 0, sizeof(u32)*MAX_STREAM_BUFFER);
  468. #endif
  469. *r_needed = 0;
  470. *r_freed = 0;
  471. spin_lock_irqsave(&chip->msg_lock, flags);
  472. lx_message_init(&chip->rmh, CMD_08_ASK_BUFFERS);
  473. chip->rmh.cmd[0] |= pipe_cmd;
  474. err = lx_message_send_atomic(chip, &chip->rmh);
  475. if (!err) {
  476. int i;
  477. for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
  478. u32 stat = chip->rmh.stat[i];
  479. if (stat & (BF_EOB << BUFF_FLAGS_OFFSET)) {
  480. /* finished */
  481. *r_freed += 1;
  482. if (size_array)
  483. size_array[i] = stat & MASK_DATA_SIZE;
  484. } else if ((stat & (BF_VALID << BUFF_FLAGS_OFFSET))
  485. == 0)
  486. /* free */
  487. *r_needed += 1;
  488. }
  489. #if 0
  490. snd_printdd(LXP "CMD_08_ASK_BUFFERS: needed %d, freed %d\n",
  491. *r_needed, *r_freed);
  492. for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
  493. for (i = 0; i != chip->rmh.stat_len; ++i)
  494. snd_printdd(" stat[%d]: %x, %x\n", i,
  495. chip->rmh.stat[i],
  496. chip->rmh.stat[i] & MASK_DATA_SIZE);
  497. }
  498. #endif
  499. }
  500. spin_unlock_irqrestore(&chip->msg_lock, flags);
  501. return err;
  502. }
  503. int lx_pipe_stop(struct lx6464es *chip, u32 pipe, int is_capture)
  504. {
  505. int err;
  506. unsigned long flags;
  507. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  508. spin_lock_irqsave(&chip->msg_lock, flags);
  509. lx_message_init(&chip->rmh, CMD_09_STOP_PIPE);
  510. chip->rmh.cmd[0] |= pipe_cmd;
  511. err = lx_message_send_atomic(chip, &chip->rmh);
  512. spin_unlock_irqrestore(&chip->msg_lock, flags);
  513. return err;
  514. }
  515. static int lx_pipe_toggle_state(struct lx6464es *chip, u32 pipe, int is_capture)
  516. {
  517. int err;
  518. unsigned long flags;
  519. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  520. spin_lock_irqsave(&chip->msg_lock, flags);
  521. lx_message_init(&chip->rmh, CMD_0B_TOGGLE_PIPE_STATE);
  522. chip->rmh.cmd[0] |= pipe_cmd;
  523. err = lx_message_send_atomic(chip, &chip->rmh);
  524. spin_unlock_irqrestore(&chip->msg_lock, flags);
  525. return err;
  526. }
  527. int lx_pipe_start(struct lx6464es *chip, u32 pipe, int is_capture)
  528. {
  529. int err;
  530. err = lx_pipe_wait_for_idle(chip, pipe, is_capture);
  531. if (err < 0)
  532. return err;
  533. err = lx_pipe_toggle_state(chip, pipe, is_capture);
  534. return err;
  535. }
  536. int lx_pipe_pause(struct lx6464es *chip, u32 pipe, int is_capture)
  537. {
  538. int err = 0;
  539. err = lx_pipe_wait_for_start(chip, pipe, is_capture);
  540. if (err < 0)
  541. return err;
  542. err = lx_pipe_toggle_state(chip, pipe, is_capture);
  543. return err;
  544. }
  545. int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture,
  546. u64 *rsample_count)
  547. {
  548. int err;
  549. unsigned long flags;
  550. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  551. spin_lock_irqsave(&chip->msg_lock, flags);
  552. lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
  553. chip->rmh.cmd[0] |= pipe_cmd;
  554. chip->rmh.stat_len = 2; /* need all words here! */
  555. err = lx_message_send_atomic(chip, &chip->rmh); /* don't sleep! */
  556. if (err != 0)
  557. snd_printk(KERN_ERR
  558. "lx6464es: could not query pipe's sample count\n");
  559. else {
  560. *rsample_count = ((u64)(chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
  561. << 24) /* hi part */
  562. + chip->rmh.stat[1]; /* lo part */
  563. }
  564. spin_unlock_irqrestore(&chip->msg_lock, flags);
  565. return err;
  566. }
  567. int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate)
  568. {
  569. int err;
  570. unsigned long flags;
  571. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  572. spin_lock_irqsave(&chip->msg_lock, flags);
  573. lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
  574. chip->rmh.cmd[0] |= pipe_cmd;
  575. err = lx_message_send_atomic(chip, &chip->rmh);
  576. if (err != 0)
  577. snd_printk(KERN_ERR "lx6464es: could not query pipe's state\n");
  578. else
  579. *rstate = (chip->rmh.stat[0] >> PSTATE_OFFSET) & 0x0F;
  580. spin_unlock_irqrestore(&chip->msg_lock, flags);
  581. return err;
  582. }
  583. static int lx_pipe_wait_for_state(struct lx6464es *chip, u32 pipe,
  584. int is_capture, u16 state)
  585. {
  586. int i;
  587. /* max 2*PCMOnlyGranularity = 2*1024 at 44100 = < 50 ms:
  588. * timeout 50 ms */
  589. for (i = 0; i != 50; ++i) {
  590. u16 current_state;
  591. int err = lx_pipe_state(chip, pipe, is_capture, &current_state);
  592. if (err < 0)
  593. return err;
  594. if (current_state == state)
  595. return 0;
  596. mdelay(1);
  597. }
  598. return -ETIMEDOUT;
  599. }
  600. int lx_pipe_wait_for_start(struct lx6464es *chip, u32 pipe, int is_capture)
  601. {
  602. return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_RUN);
  603. }
  604. int lx_pipe_wait_for_idle(struct lx6464es *chip, u32 pipe, int is_capture)
  605. {
  606. return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_IDLE);
  607. }
  608. /* low-level stream handling */
  609. int lx_stream_set_state(struct lx6464es *chip, u32 pipe,
  610. int is_capture, enum stream_state_t state)
  611. {
  612. int err;
  613. unsigned long flags;
  614. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  615. spin_lock_irqsave(&chip->msg_lock, flags);
  616. lx_message_init(&chip->rmh, CMD_13_SET_STREAM_STATE);
  617. chip->rmh.cmd[0] |= pipe_cmd;
  618. chip->rmh.cmd[0] |= state;
  619. err = lx_message_send_atomic(chip, &chip->rmh);
  620. spin_unlock_irqrestore(&chip->msg_lock, flags);
  621. return err;
  622. }
  623. int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime,
  624. u32 pipe, int is_capture)
  625. {
  626. int err;
  627. unsigned long flags;
  628. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  629. u32 channels = runtime->channels;
  630. if (runtime->channels != channels)
  631. snd_printk(KERN_ERR LXP "channel count mismatch: %d vs %d",
  632. runtime->channels, channels);
  633. spin_lock_irqsave(&chip->msg_lock, flags);
  634. lx_message_init(&chip->rmh, CMD_0C_DEF_STREAM);
  635. chip->rmh.cmd[0] |= pipe_cmd;
  636. if (runtime->sample_bits == 16)
  637. /* 16 bit format */
  638. chip->rmh.cmd[0] |= (STREAM_FMT_16b << STREAM_FMT_OFFSET);
  639. if (snd_pcm_format_little_endian(runtime->format))
  640. /* little endian/intel format */
  641. chip->rmh.cmd[0] |= (STREAM_FMT_intel << STREAM_FMT_OFFSET);
  642. chip->rmh.cmd[0] |= channels-1;
  643. err = lx_message_send_atomic(chip, &chip->rmh);
  644. spin_unlock_irqrestore(&chip->msg_lock, flags);
  645. return err;
  646. }
  647. int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture,
  648. int *rstate)
  649. {
  650. int err;
  651. unsigned long flags;
  652. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  653. spin_lock_irqsave(&chip->msg_lock, flags);
  654. lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
  655. chip->rmh.cmd[0] |= pipe_cmd;
  656. err = lx_message_send_atomic(chip, &chip->rmh);
  657. *rstate = (chip->rmh.stat[0] & SF_START) ? START_STATE : PAUSE_STATE;
  658. spin_unlock_irqrestore(&chip->msg_lock, flags);
  659. return err;
  660. }
  661. int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture,
  662. u64 *r_bytepos)
  663. {
  664. int err;
  665. unsigned long flags;
  666. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  667. spin_lock_irqsave(&chip->msg_lock, flags);
  668. lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
  669. chip->rmh.cmd[0] |= pipe_cmd;
  670. err = lx_message_send_atomic(chip, &chip->rmh);
  671. *r_bytepos = ((u64) (chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
  672. << 32) /* hi part */
  673. + chip->rmh.stat[1]; /* lo part */
  674. spin_unlock_irqrestore(&chip->msg_lock, flags);
  675. return err;
  676. }
  677. /* low-level buffer handling */
  678. int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture,
  679. u32 buffer_size, u32 buf_address_lo, u32 buf_address_hi,
  680. u32 *r_buffer_index)
  681. {
  682. int err;
  683. unsigned long flags;
  684. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  685. spin_lock_irqsave(&chip->msg_lock, flags);
  686. lx_message_init(&chip->rmh, CMD_0F_UPDATE_BUFFER);
  687. chip->rmh.cmd[0] |= pipe_cmd;
  688. chip->rmh.cmd[0] |= BF_NOTIFY_EOB; /* request interrupt notification */
  689. /* todo: pause request, circular buffer */
  690. chip->rmh.cmd[1] = buffer_size & MASK_DATA_SIZE;
  691. chip->rmh.cmd[2] = buf_address_lo;
  692. if (buf_address_hi) {
  693. chip->rmh.cmd_len = 4;
  694. chip->rmh.cmd[3] = buf_address_hi;
  695. chip->rmh.cmd[0] |= BF_64BITS_ADR;
  696. }
  697. err = lx_message_send_atomic(chip, &chip->rmh);
  698. if (err == 0) {
  699. *r_buffer_index = chip->rmh.stat[0];
  700. goto done;
  701. }
  702. if (err == EB_RBUFFERS_TABLE_OVERFLOW)
  703. snd_printk(LXP "lx_buffer_give EB_RBUFFERS_TABLE_OVERFLOW\n");
  704. if (err == EB_INVALID_STREAM)
  705. snd_printk(LXP "lx_buffer_give EB_INVALID_STREAM\n");
  706. if (err == EB_CMD_REFUSED)
  707. snd_printk(LXP "lx_buffer_give EB_CMD_REFUSED\n");
  708. done:
  709. spin_unlock_irqrestore(&chip->msg_lock, flags);
  710. return err;
  711. }
  712. int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture,
  713. u32 *r_buffer_size)
  714. {
  715. int err;
  716. unsigned long flags;
  717. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  718. spin_lock_irqsave(&chip->msg_lock, flags);
  719. lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
  720. chip->rmh.cmd[0] |= pipe_cmd;
  721. chip->rmh.cmd[0] |= MASK_BUFFER_ID; /* ask for the current buffer: the
  722. * microblaze will seek for it */
  723. err = lx_message_send_atomic(chip, &chip->rmh);
  724. if (err == 0)
  725. *r_buffer_size = chip->rmh.stat[0] & MASK_DATA_SIZE;
  726. spin_unlock_irqrestore(&chip->msg_lock, flags);
  727. return err;
  728. }
  729. int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture,
  730. u32 buffer_index)
  731. {
  732. int err;
  733. unsigned long flags;
  734. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  735. spin_lock_irqsave(&chip->msg_lock, flags);
  736. lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
  737. chip->rmh.cmd[0] |= pipe_cmd;
  738. chip->rmh.cmd[0] |= buffer_index;
  739. err = lx_message_send_atomic(chip, &chip->rmh);
  740. spin_unlock_irqrestore(&chip->msg_lock, flags);
  741. return err;
  742. }
  743. /* low-level gain/peak handling
  744. *
  745. * \todo: can we unmute capture/playback channels independently?
  746. *
  747. * */
  748. int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute)
  749. {
  750. int err;
  751. unsigned long flags;
  752. /* bit set to 1: channel muted */
  753. u64 mute_mask = unmute ? 0 : 0xFFFFFFFFFFFFFFFFLLU;
  754. spin_lock_irqsave(&chip->msg_lock, flags);
  755. lx_message_init(&chip->rmh, CMD_0D_SET_MUTE);
  756. chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, 0);
  757. chip->rmh.cmd[1] = (u32)(mute_mask >> (u64)32); /* hi part */
  758. chip->rmh.cmd[2] = (u32)(mute_mask & (u64)0xFFFFFFFF); /* lo part */
  759. snd_printk("mute %x %x %x\n", chip->rmh.cmd[0], chip->rmh.cmd[1],
  760. chip->rmh.cmd[2]);
  761. err = lx_message_send_atomic(chip, &chip->rmh);
  762. spin_unlock_irqrestore(&chip->msg_lock, flags);
  763. return err;
  764. }
  765. static u32 peak_map[] = {
  766. 0x00000109, /* -90.308dB */
  767. 0x0000083B, /* -72.247dB */
  768. 0x000020C4, /* -60.205dB */
  769. 0x00008273, /* -48.030dB */
  770. 0x00020756, /* -36.005dB */
  771. 0x00040C37, /* -30.001dB */
  772. 0x00081385, /* -24.002dB */
  773. 0x00101D3F, /* -18.000dB */
  774. 0x0016C310, /* -15.000dB */
  775. 0x002026F2, /* -12.001dB */
  776. 0x002D6A86, /* -9.000dB */
  777. 0x004026E6, /* -6.004dB */
  778. 0x005A9DF6, /* -3.000dB */
  779. 0x0065AC8B, /* -2.000dB */
  780. 0x00721481, /* -1.000dB */
  781. 0x007FFFFF, /* FS */
  782. };
  783. int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels,
  784. u32 *r_levels)
  785. {
  786. int err = 0;
  787. unsigned long flags;
  788. int i;
  789. spin_lock_irqsave(&chip->msg_lock, flags);
  790. for (i = 0; i < channels; i += 4) {
  791. u32 s0, s1, s2, s3;
  792. lx_message_init(&chip->rmh, CMD_12_GET_PEAK);
  793. chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, i);
  794. err = lx_message_send_atomic(chip, &chip->rmh);
  795. if (err == 0) {
  796. s0 = peak_map[chip->rmh.stat[0] & 0x0F];
  797. s1 = peak_map[(chip->rmh.stat[0] >> 4) & 0xf];
  798. s2 = peak_map[(chip->rmh.stat[0] >> 8) & 0xf];
  799. s3 = peak_map[(chip->rmh.stat[0] >> 12) & 0xf];
  800. } else
  801. s0 = s1 = s2 = s3 = 0;
  802. r_levels[0] = s0;
  803. r_levels[1] = s1;
  804. r_levels[2] = s2;
  805. r_levels[3] = s3;
  806. r_levels += 4;
  807. }
  808. spin_unlock_irqrestore(&chip->msg_lock, flags);
  809. return err;
  810. }
  811. /* interrupt handling */
  812. #define PCX_IRQ_NONE 0
  813. #define IRQCS_ACTIVE_PCIDB 0x00002000L /* Bit nø 13 */
  814. #define IRQCS_ENABLE_PCIIRQ 0x00000100L /* Bit nø 08 */
  815. #define IRQCS_ENABLE_PCIDB 0x00000200L /* Bit nø 09 */
  816. static u32 lx_interrupt_test_ack(struct lx6464es *chip)
  817. {
  818. u32 irqcs = lx_plx_reg_read(chip, ePLX_IRQCS);
  819. /* Test if PCI Doorbell interrupt is active */
  820. if (irqcs & IRQCS_ACTIVE_PCIDB) {
  821. u32 temp;
  822. irqcs = PCX_IRQ_NONE;
  823. while ((temp = lx_plx_reg_read(chip, ePLX_L2PCIDB))) {
  824. /* RAZ interrupt */
  825. irqcs |= temp;
  826. lx_plx_reg_write(chip, ePLX_L2PCIDB, temp);
  827. }
  828. return irqcs;
  829. }
  830. return PCX_IRQ_NONE;
  831. }
  832. static int lx_interrupt_ack(struct lx6464es *chip, u32 *r_irqsrc,
  833. int *r_async_pending, int *r_async_escmd)
  834. {
  835. u32 irq_async;
  836. u32 irqsrc = lx_interrupt_test_ack(chip);
  837. if (irqsrc == PCX_IRQ_NONE)
  838. return 0;
  839. *r_irqsrc = irqsrc;
  840. irq_async = irqsrc & MASK_SYS_ASYNC_EVENTS; /* + EtherSound response
  841. * (set by xilinx) + EOB */
  842. if (irq_async & MASK_SYS_STATUS_ESA) {
  843. irq_async &= ~MASK_SYS_STATUS_ESA;
  844. *r_async_escmd = 1;
  845. }
  846. if (irq_async) {
  847. /* snd_printd("interrupt: async event pending\n"); */
  848. *r_async_pending = 1;
  849. }
  850. return 1;
  851. }
  852. static int lx_interrupt_handle_async_events(struct lx6464es *chip, u32 irqsrc,
  853. int *r_freq_changed,
  854. u64 *r_notified_in_pipe_mask,
  855. u64 *r_notified_out_pipe_mask)
  856. {
  857. int err;
  858. u32 stat[9]; /* answer from CMD_04_GET_EVENT */
  859. /* On peut optimiser pour ne pas lire les evenements vides
  860. * les mots de réponse sont dans l'ordre suivant :
  861. * Stat[0] mot de status général
  862. * Stat[1] fin de buffer OUT pF
  863. * Stat[2] fin de buffer OUT pf
  864. * Stat[3] fin de buffer IN pF
  865. * Stat[4] fin de buffer IN pf
  866. * Stat[5] underrun poid fort
  867. * Stat[6] underrun poid faible
  868. * Stat[7] overrun poid fort
  869. * Stat[8] overrun poid faible
  870. * */
  871. u64 orun_mask;
  872. u64 urun_mask;
  873. #if 0
  874. int has_underrun = (irqsrc & MASK_SYS_STATUS_URUN) ? 1 : 0;
  875. int has_overrun = (irqsrc & MASK_SYS_STATUS_ORUN) ? 1 : 0;
  876. #endif
  877. int eb_pending_out = (irqsrc & MASK_SYS_STATUS_EOBO) ? 1 : 0;
  878. int eb_pending_in = (irqsrc & MASK_SYS_STATUS_EOBI) ? 1 : 0;
  879. *r_freq_changed = (irqsrc & MASK_SYS_STATUS_FREQ) ? 1 : 0;
  880. err = lx_dsp_read_async_events(chip, stat);
  881. if (err < 0)
  882. return err;
  883. if (eb_pending_in) {
  884. *r_notified_in_pipe_mask = ((u64)stat[3] << 32)
  885. + stat[4];
  886. snd_printdd(LXP "interrupt: EOBI pending %llx\n",
  887. *r_notified_in_pipe_mask);
  888. }
  889. if (eb_pending_out) {
  890. *r_notified_out_pipe_mask = ((u64)stat[1] << 32)
  891. + stat[2];
  892. snd_printdd(LXP "interrupt: EOBO pending %llx\n",
  893. *r_notified_out_pipe_mask);
  894. }
  895. orun_mask = ((u64)stat[7] << 32) + stat[8];
  896. urun_mask = ((u64)stat[5] << 32) + stat[6];
  897. /* todo: handle xrun notification */
  898. return err;
  899. }
  900. static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
  901. struct lx_stream *lx_stream)
  902. {
  903. struct snd_pcm_substream *substream = lx_stream->stream;
  904. const unsigned int is_capture = lx_stream->is_capture;
  905. int err;
  906. unsigned long flags;
  907. const u32 channels = substream->runtime->channels;
  908. const u32 bytes_per_frame = channels * 3;
  909. const u32 period_size = substream->runtime->period_size;
  910. const u32 period_bytes = period_size * bytes_per_frame;
  911. const u32 pos = lx_stream->frame_pos;
  912. const u32 next_pos = ((pos+1) == substream->runtime->periods) ?
  913. 0 : pos + 1;
  914. dma_addr_t buf = substream->dma_buffer.addr + pos * period_bytes;
  915. u32 buf_hi = 0;
  916. u32 buf_lo = 0;
  917. u32 buffer_index = 0;
  918. u32 needed, freed;
  919. u32 size_array[MAX_STREAM_BUFFER];
  920. snd_printdd("->lx_interrupt_request_new_buffer\n");
  921. spin_lock_irqsave(&chip->lock, flags);
  922. err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array);
  923. snd_printdd(LXP "interrupt: needed %d, freed %d\n", needed, freed);
  924. unpack_pointer(buf, &buf_lo, &buf_hi);
  925. err = lx_buffer_give(chip, 0, is_capture, period_bytes, buf_lo, buf_hi,
  926. &buffer_index);
  927. snd_printdd(LXP "interrupt: gave buffer index %x on %p (%d bytes)\n",
  928. buffer_index, (void *)buf, period_bytes);
  929. lx_stream->frame_pos = next_pos;
  930. spin_unlock_irqrestore(&chip->lock, flags);
  931. return err;
  932. }
  933. void lx_tasklet_playback(unsigned long data)
  934. {
  935. struct lx6464es *chip = (struct lx6464es *)data;
  936. struct lx_stream *lx_stream = &chip->playback_stream;
  937. int err;
  938. snd_printdd("->lx_tasklet_playback\n");
  939. err = lx_interrupt_request_new_buffer(chip, lx_stream);
  940. if (err < 0)
  941. snd_printk(KERN_ERR LXP
  942. "cannot request new buffer for playback\n");
  943. snd_pcm_period_elapsed(lx_stream->stream);
  944. }
  945. void lx_tasklet_capture(unsigned long data)
  946. {
  947. struct lx6464es *chip = (struct lx6464es *)data;
  948. struct lx_stream *lx_stream = &chip->capture_stream;
  949. int err;
  950. snd_printdd("->lx_tasklet_capture\n");
  951. err = lx_interrupt_request_new_buffer(chip, lx_stream);
  952. if (err < 0)
  953. snd_printk(KERN_ERR LXP
  954. "cannot request new buffer for capture\n");
  955. snd_pcm_period_elapsed(lx_stream->stream);
  956. }
  957. static int lx_interrupt_handle_audio_transfer(struct lx6464es *chip,
  958. u64 notified_in_pipe_mask,
  959. u64 notified_out_pipe_mask)
  960. {
  961. int err = 0;
  962. if (notified_in_pipe_mask) {
  963. snd_printdd(LXP "requesting audio transfer for capture\n");
  964. tasklet_hi_schedule(&chip->tasklet_capture);
  965. }
  966. if (notified_out_pipe_mask) {
  967. snd_printdd(LXP "requesting audio transfer for playback\n");
  968. tasklet_hi_schedule(&chip->tasklet_playback);
  969. }
  970. return err;
  971. }
  972. irqreturn_t lx_interrupt(int irq, void *dev_id)
  973. {
  974. struct lx6464es *chip = dev_id;
  975. int async_pending, async_escmd;
  976. u32 irqsrc;
  977. spin_lock(&chip->lock);
  978. snd_printdd("**************************************************\n");
  979. if (!lx_interrupt_ack(chip, &irqsrc, &async_pending, &async_escmd)) {
  980. spin_unlock(&chip->lock);
  981. snd_printdd("IRQ_NONE\n");
  982. return IRQ_NONE; /* this device did not cause the interrupt */
  983. }
  984. if (irqsrc & MASK_SYS_STATUS_CMD_DONE)
  985. goto exit;
  986. #if 0
  987. if (irqsrc & MASK_SYS_STATUS_EOBI)
  988. snd_printdd(LXP "interrupt: EOBI\n");
  989. if (irqsrc & MASK_SYS_STATUS_EOBO)
  990. snd_printdd(LXP "interrupt: EOBO\n");
  991. if (irqsrc & MASK_SYS_STATUS_URUN)
  992. snd_printdd(LXP "interrupt: URUN\n");
  993. if (irqsrc & MASK_SYS_STATUS_ORUN)
  994. snd_printdd(LXP "interrupt: ORUN\n");
  995. #endif
  996. if (async_pending) {
  997. u64 notified_in_pipe_mask = 0;
  998. u64 notified_out_pipe_mask = 0;
  999. int freq_changed;
  1000. int err;
  1001. /* handle async events */
  1002. err = lx_interrupt_handle_async_events(chip, irqsrc,
  1003. &freq_changed,
  1004. &notified_in_pipe_mask,
  1005. &notified_out_pipe_mask);
  1006. if (err)
  1007. snd_printk(KERN_ERR LXP
  1008. "error handling async events\n");
  1009. err = lx_interrupt_handle_audio_transfer(chip,
  1010. notified_in_pipe_mask,
  1011. notified_out_pipe_mask
  1012. );
  1013. if (err)
  1014. snd_printk(KERN_ERR LXP
  1015. "error during audio transfer\n");
  1016. }
  1017. if (async_escmd) {
  1018. #if 0
  1019. /* backdoor for ethersound commands
  1020. *
  1021. * for now, we do not need this
  1022. *
  1023. * */
  1024. snd_printdd("lx6464es: interrupt requests escmd handling\n");
  1025. #endif
  1026. }
  1027. exit:
  1028. spin_unlock(&chip->lock);
  1029. return IRQ_HANDLED; /* this device caused the interrupt */
  1030. }
  1031. static void lx_irq_set(struct lx6464es *chip, int enable)
  1032. {
  1033. u32 reg = lx_plx_reg_read(chip, ePLX_IRQCS);
  1034. /* enable/disable interrupts
  1035. *
  1036. * Set the Doorbell and PCI interrupt enable bits
  1037. *
  1038. * */
  1039. if (enable)
  1040. reg |= (IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
  1041. else
  1042. reg &= ~(IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
  1043. lx_plx_reg_write(chip, ePLX_IRQCS, reg);
  1044. }
  1045. void lx_irq_enable(struct lx6464es *chip)
  1046. {
  1047. snd_printdd("->lx_irq_enable\n");
  1048. lx_irq_set(chip, 1);
  1049. }
  1050. void lx_irq_disable(struct lx6464es *chip)
  1051. {
  1052. snd_printdd("->lx_irq_disable\n");
  1053. lx_irq_set(chip, 0);
  1054. }