omap_ssi_port.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431
  1. /* OMAP SSI port driver.
  2. *
  3. * Copyright (C) 2010 Nokia Corporation. All rights reserved.
  4. * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
  5. *
  6. * Contact: Carlos Chinea <carlos.chinea@nokia.com>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * version 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  20. * 02110-1301 USA
  21. */
  22. #include <linux/platform_device.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/pm_runtime.h>
  25. #include <linux/delay.h>
  26. #include <linux/gpio/consumer.h>
  27. #include <linux/pinctrl/consumer.h>
  28. #include <linux/debugfs.h>
  29. #include "omap_ssi_regs.h"
  30. #include "omap_ssi.h"
  31. static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
  32. {
  33. return 0;
  34. }
  35. static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
  36. {
  37. return 0;
  38. }
  39. static inline unsigned int ssi_wakein(struct hsi_port *port)
  40. {
  41. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  42. return gpiod_get_value(omap_port->wake_gpio);
  43. }
  44. #ifdef CONFIG_DEBUG_FS
  45. static void ssi_debug_remove_port(struct hsi_port *port)
  46. {
  47. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  48. debugfs_remove_recursive(omap_port->dir);
  49. }
  50. static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused)
  51. {
  52. struct hsi_port *port = m->private;
  53. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  54. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  55. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  56. void __iomem *base = omap_ssi->sys;
  57. unsigned int ch;
  58. pm_runtime_get_sync(omap_port->pdev);
  59. if (omap_port->wake_irq > 0)
  60. seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
  61. seq_printf(m, "WAKE\t\t: 0x%08x\n",
  62. readl(base + SSI_WAKE_REG(port->num)));
  63. seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
  64. readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
  65. seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
  66. readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
  67. /* SST */
  68. base = omap_port->sst_base;
  69. seq_puts(m, "\nSST\n===\n");
  70. seq_printf(m, "ID SST\t\t: 0x%08x\n",
  71. readl(base + SSI_SST_ID_REG));
  72. seq_printf(m, "MODE\t\t: 0x%08x\n",
  73. readl(base + SSI_SST_MODE_REG));
  74. seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
  75. readl(base + SSI_SST_FRAMESIZE_REG));
  76. seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
  77. readl(base + SSI_SST_DIVISOR_REG));
  78. seq_printf(m, "CHANNELS\t: 0x%08x\n",
  79. readl(base + SSI_SST_CHANNELS_REG));
  80. seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
  81. readl(base + SSI_SST_ARBMODE_REG));
  82. seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
  83. readl(base + SSI_SST_TXSTATE_REG));
  84. seq_printf(m, "BUFSTATE\t: 0x%08x\n",
  85. readl(base + SSI_SST_BUFSTATE_REG));
  86. seq_printf(m, "BREAK\t\t: 0x%08x\n",
  87. readl(base + SSI_SST_BREAK_REG));
  88. for (ch = 0; ch < omap_port->channels; ch++) {
  89. seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
  90. readl(base + SSI_SST_BUFFER_CH_REG(ch)));
  91. }
  92. /* SSR */
  93. base = omap_port->ssr_base;
  94. seq_puts(m, "\nSSR\n===\n");
  95. seq_printf(m, "ID SSR\t\t: 0x%08x\n",
  96. readl(base + SSI_SSR_ID_REG));
  97. seq_printf(m, "MODE\t\t: 0x%08x\n",
  98. readl(base + SSI_SSR_MODE_REG));
  99. seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
  100. readl(base + SSI_SSR_FRAMESIZE_REG));
  101. seq_printf(m, "CHANNELS\t: 0x%08x\n",
  102. readl(base + SSI_SSR_CHANNELS_REG));
  103. seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
  104. readl(base + SSI_SSR_TIMEOUT_REG));
  105. seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
  106. readl(base + SSI_SSR_RXSTATE_REG));
  107. seq_printf(m, "BUFSTATE\t: 0x%08x\n",
  108. readl(base + SSI_SSR_BUFSTATE_REG));
  109. seq_printf(m, "BREAK\t\t: 0x%08x\n",
  110. readl(base + SSI_SSR_BREAK_REG));
  111. seq_printf(m, "ERROR\t\t: 0x%08x\n",
  112. readl(base + SSI_SSR_ERROR_REG));
  113. seq_printf(m, "ERRORACK\t: 0x%08x\n",
  114. readl(base + SSI_SSR_ERRORACK_REG));
  115. for (ch = 0; ch < omap_port->channels; ch++) {
  116. seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
  117. readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
  118. }
  119. pm_runtime_put_autosuspend(omap_port->pdev);
  120. return 0;
  121. }
  122. static int ssi_port_regs_open(struct inode *inode, struct file *file)
  123. {
  124. return single_open(file, ssi_debug_port_show, inode->i_private);
  125. }
  126. static const struct file_operations ssi_port_regs_fops = {
  127. .open = ssi_port_regs_open,
  128. .read = seq_read,
  129. .llseek = seq_lseek,
  130. .release = single_release,
  131. };
  132. static int ssi_div_get(void *data, u64 *val)
  133. {
  134. struct hsi_port *port = data;
  135. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  136. pm_runtime_get_sync(omap_port->pdev);
  137. *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
  138. pm_runtime_put_autosuspend(omap_port->pdev);
  139. return 0;
  140. }
  141. static int ssi_div_set(void *data, u64 val)
  142. {
  143. struct hsi_port *port = data;
  144. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  145. if (val > 127)
  146. return -EINVAL;
  147. pm_runtime_get_sync(omap_port->pdev);
  148. writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
  149. omap_port->sst.divisor = val;
  150. pm_runtime_put_autosuspend(omap_port->pdev);
  151. return 0;
  152. }
  153. DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
  154. static int ssi_debug_add_port(struct omap_ssi_port *omap_port,
  155. struct dentry *dir)
  156. {
  157. struct hsi_port *port = to_hsi_port(omap_port->dev);
  158. dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
  159. if (!dir)
  160. return -ENOMEM;
  161. omap_port->dir = dir;
  162. debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
  163. dir = debugfs_create_dir("sst", dir);
  164. if (!dir)
  165. return -ENOMEM;
  166. debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port,
  167. &ssi_sst_div_fops);
  168. return 0;
  169. }
  170. #endif
  171. static void ssi_process_errqueue(struct work_struct *work)
  172. {
  173. struct omap_ssi_port *omap_port;
  174. struct list_head *head, *tmp;
  175. struct hsi_msg *msg;
  176. omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work);
  177. list_for_each_safe(head, tmp, &omap_port->errqueue) {
  178. msg = list_entry(head, struct hsi_msg, link);
  179. msg->complete(msg);
  180. list_del(head);
  181. }
  182. }
  183. static int ssi_claim_lch(struct hsi_msg *msg)
  184. {
  185. struct hsi_port *port = hsi_get_port(msg->cl);
  186. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  187. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  188. int lch;
  189. for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
  190. if (!omap_ssi->gdd_trn[lch].msg) {
  191. omap_ssi->gdd_trn[lch].msg = msg;
  192. omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
  193. return lch;
  194. }
  195. return -EBUSY;
  196. }
  197. static int ssi_start_dma(struct hsi_msg *msg, int lch)
  198. {
  199. struct hsi_port *port = hsi_get_port(msg->cl);
  200. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  201. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  202. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  203. void __iomem *gdd = omap_ssi->gdd;
  204. int err;
  205. u16 csdp;
  206. u16 ccr;
  207. u32 s_addr;
  208. u32 d_addr;
  209. u32 tmp;
  210. /* Hold clocks during the transfer */
  211. pm_runtime_get(omap_port->pdev);
  212. if (!pm_runtime_active(omap_port->pdev)) {
  213. dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n");
  214. pm_runtime_put_autosuspend(omap_port->pdev);
  215. return -EREMOTEIO;
  216. }
  217. if (msg->ttype == HSI_MSG_READ) {
  218. err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
  219. DMA_FROM_DEVICE);
  220. if (err < 0) {
  221. dev_dbg(&ssi->device, "DMA map SG failed !\n");
  222. pm_runtime_put_autosuspend(omap_port->pdev);
  223. return err;
  224. }
  225. csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
  226. SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
  227. SSI_DATA_TYPE_S32;
  228. ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
  229. ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
  230. SSI_CCR_ENABLE;
  231. s_addr = omap_port->ssr_dma +
  232. SSI_SSR_BUFFER_CH_REG(msg->channel);
  233. d_addr = sg_dma_address(msg->sgt.sgl);
  234. } else {
  235. err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
  236. DMA_TO_DEVICE);
  237. if (err < 0) {
  238. dev_dbg(&ssi->device, "DMA map SG failed !\n");
  239. pm_runtime_put_autosuspend(omap_port->pdev);
  240. return err;
  241. }
  242. csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
  243. SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
  244. SSI_DATA_TYPE_S32;
  245. ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
  246. ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
  247. SSI_CCR_ENABLE;
  248. s_addr = sg_dma_address(msg->sgt.sgl);
  249. d_addr = omap_port->sst_dma +
  250. SSI_SST_BUFFER_CH_REG(msg->channel);
  251. }
  252. dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
  253. lch, csdp, ccr, s_addr, d_addr);
  254. writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
  255. writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
  256. writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
  257. writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
  258. writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
  259. gdd + SSI_GDD_CEN_REG(lch));
  260. spin_lock_bh(&omap_ssi->lock);
  261. tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  262. tmp |= SSI_GDD_LCH(lch);
  263. writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  264. spin_unlock_bh(&omap_ssi->lock);
  265. writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
  266. msg->status = HSI_STATUS_PROCEEDING;
  267. return 0;
  268. }
  269. static int ssi_start_pio(struct hsi_msg *msg)
  270. {
  271. struct hsi_port *port = hsi_get_port(msg->cl);
  272. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  273. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  274. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  275. u32 val;
  276. pm_runtime_get(omap_port->pdev);
  277. if (!pm_runtime_active(omap_port->pdev)) {
  278. dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n");
  279. pm_runtime_put_autosuspend(omap_port->pdev);
  280. return -EREMOTEIO;
  281. }
  282. if (msg->ttype == HSI_MSG_WRITE) {
  283. val = SSI_DATAACCEPT(msg->channel);
  284. /* Hold clocks for pio writes */
  285. pm_runtime_get(omap_port->pdev);
  286. } else {
  287. val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
  288. }
  289. dev_dbg(&port->device, "Single %s transfer\n",
  290. msg->ttype ? "write" : "read");
  291. val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  292. writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  293. pm_runtime_put_autosuspend(omap_port->pdev);
  294. msg->actual_len = 0;
  295. msg->status = HSI_STATUS_PROCEEDING;
  296. return 0;
  297. }
  298. static int ssi_start_transfer(struct list_head *queue)
  299. {
  300. struct hsi_msg *msg;
  301. int lch = -1;
  302. if (list_empty(queue))
  303. return 0;
  304. msg = list_first_entry(queue, struct hsi_msg, link);
  305. if (msg->status != HSI_STATUS_QUEUED)
  306. return 0;
  307. if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
  308. lch = ssi_claim_lch(msg);
  309. if (lch >= 0)
  310. return ssi_start_dma(msg, lch);
  311. else
  312. return ssi_start_pio(msg);
  313. }
  314. static int ssi_async_break(struct hsi_msg *msg)
  315. {
  316. struct hsi_port *port = hsi_get_port(msg->cl);
  317. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  318. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  319. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  320. int err = 0;
  321. u32 tmp;
  322. pm_runtime_get_sync(omap_port->pdev);
  323. if (msg->ttype == HSI_MSG_WRITE) {
  324. if (omap_port->sst.mode != SSI_MODE_FRAME) {
  325. err = -EINVAL;
  326. goto out;
  327. }
  328. writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
  329. msg->status = HSI_STATUS_COMPLETED;
  330. msg->complete(msg);
  331. } else {
  332. if (omap_port->ssr.mode != SSI_MODE_FRAME) {
  333. err = -EINVAL;
  334. goto out;
  335. }
  336. spin_lock_bh(&omap_port->lock);
  337. tmp = readl(omap_ssi->sys +
  338. SSI_MPU_ENABLE_REG(port->num, 0));
  339. writel(tmp | SSI_BREAKDETECTED,
  340. omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  341. msg->status = HSI_STATUS_PROCEEDING;
  342. list_add_tail(&msg->link, &omap_port->brkqueue);
  343. spin_unlock_bh(&omap_port->lock);
  344. }
  345. out:
  346. pm_runtime_mark_last_busy(omap_port->pdev);
  347. pm_runtime_put_autosuspend(omap_port->pdev);
  348. return err;
  349. }
  350. static int ssi_async(struct hsi_msg *msg)
  351. {
  352. struct hsi_port *port = hsi_get_port(msg->cl);
  353. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  354. struct list_head *queue;
  355. int err = 0;
  356. BUG_ON(!msg);
  357. if (msg->sgt.nents > 1)
  358. return -ENOSYS; /* TODO: Add sg support */
  359. if (msg->break_frame)
  360. return ssi_async_break(msg);
  361. if (msg->ttype) {
  362. BUG_ON(msg->channel >= omap_port->sst.channels);
  363. queue = &omap_port->txqueue[msg->channel];
  364. } else {
  365. BUG_ON(msg->channel >= omap_port->ssr.channels);
  366. queue = &omap_port->rxqueue[msg->channel];
  367. }
  368. msg->status = HSI_STATUS_QUEUED;
  369. pm_runtime_get_sync(omap_port->pdev);
  370. spin_lock_bh(&omap_port->lock);
  371. list_add_tail(&msg->link, queue);
  372. err = ssi_start_transfer(queue);
  373. if (err < 0) {
  374. list_del(&msg->link);
  375. msg->status = HSI_STATUS_ERROR;
  376. }
  377. spin_unlock_bh(&omap_port->lock);
  378. pm_runtime_mark_last_busy(omap_port->pdev);
  379. pm_runtime_put_autosuspend(omap_port->pdev);
  380. dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
  381. msg->status, msg->ttype, msg->channel);
  382. return err;
  383. }
  384. static u32 ssi_calculate_div(struct hsi_controller *ssi)
  385. {
  386. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  387. u32 tx_fckrate = (u32) omap_ssi->fck_rate;
  388. /* / 2 : SSI TX clock is always half of the SSI functional clock */
  389. tx_fckrate >>= 1;
  390. /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
  391. tx_fckrate--;
  392. dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
  393. tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
  394. omap_ssi->max_speed);
  395. return tx_fckrate / omap_ssi->max_speed;
  396. }
  397. static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
  398. {
  399. struct list_head *node, *tmp;
  400. struct hsi_msg *msg;
  401. list_for_each_safe(node, tmp, queue) {
  402. msg = list_entry(node, struct hsi_msg, link);
  403. if ((cl) && (cl != msg->cl))
  404. continue;
  405. list_del(node);
  406. pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
  407. msg->channel, msg, msg->sgt.sgl->length,
  408. msg->ttype, msg->context);
  409. if (msg->destructor)
  410. msg->destructor(msg);
  411. else
  412. hsi_free_msg(msg);
  413. }
  414. }
  415. static int ssi_setup(struct hsi_client *cl)
  416. {
  417. struct hsi_port *port = to_hsi_port(cl->device.parent);
  418. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  419. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  420. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  421. void __iomem *sst = omap_port->sst_base;
  422. void __iomem *ssr = omap_port->ssr_base;
  423. u32 div;
  424. u32 val;
  425. int err = 0;
  426. pm_runtime_get_sync(omap_port->pdev);
  427. spin_lock_bh(&omap_port->lock);
  428. if (cl->tx_cfg.speed)
  429. omap_ssi->max_speed = cl->tx_cfg.speed;
  430. div = ssi_calculate_div(ssi);
  431. if (div > SSI_MAX_DIVISOR) {
  432. dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
  433. cl->tx_cfg.speed, div);
  434. err = -EINVAL;
  435. goto out;
  436. }
  437. /* Set TX/RX module to sleep to stop TX/RX during cfg update */
  438. writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
  439. writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
  440. /* Flush posted write */
  441. val = readl(ssr + SSI_SSR_MODE_REG);
  442. /* TX */
  443. writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
  444. writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
  445. writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
  446. writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
  447. writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
  448. /* RX */
  449. writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
  450. writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
  451. writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
  452. /* Cleanup the break queue if we leave FRAME mode */
  453. if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
  454. (cl->rx_cfg.mode != SSI_MODE_FRAME))
  455. ssi_flush_queue(&omap_port->brkqueue, cl);
  456. writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
  457. omap_port->channels = max(cl->rx_cfg.num_hw_channels,
  458. cl->tx_cfg.num_hw_channels);
  459. /* Shadow registering for OFF mode */
  460. /* SST */
  461. omap_port->sst.divisor = div;
  462. omap_port->sst.frame_size = 31;
  463. omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
  464. omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
  465. omap_port->sst.mode = cl->tx_cfg.mode;
  466. /* SSR */
  467. omap_port->ssr.frame_size = 31;
  468. omap_port->ssr.timeout = 0;
  469. omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
  470. omap_port->ssr.mode = cl->rx_cfg.mode;
  471. out:
  472. spin_unlock_bh(&omap_port->lock);
  473. pm_runtime_mark_last_busy(omap_port->pdev);
  474. pm_runtime_put_autosuspend(omap_port->pdev);
  475. return err;
  476. }
  477. static int ssi_flush(struct hsi_client *cl)
  478. {
  479. struct hsi_port *port = hsi_get_port(cl);
  480. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  481. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  482. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  483. struct hsi_msg *msg;
  484. void __iomem *sst = omap_port->sst_base;
  485. void __iomem *ssr = omap_port->ssr_base;
  486. unsigned int i;
  487. u32 err;
  488. pm_runtime_get_sync(omap_port->pdev);
  489. spin_lock_bh(&omap_port->lock);
  490. /* stop all ssi communication */
  491. pinctrl_pm_select_idle_state(omap_port->pdev);
  492. udelay(1); /* wait for racing frames */
  493. /* Stop all DMA transfers */
  494. for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
  495. msg = omap_ssi->gdd_trn[i].msg;
  496. if (!msg || (port != hsi_get_port(msg->cl)))
  497. continue;
  498. writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
  499. if (msg->ttype == HSI_MSG_READ)
  500. pm_runtime_put_autosuspend(omap_port->pdev);
  501. omap_ssi->gdd_trn[i].msg = NULL;
  502. }
  503. /* Flush all SST buffers */
  504. writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
  505. writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
  506. /* Flush all SSR buffers */
  507. writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
  508. writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
  509. /* Flush all errors */
  510. err = readl(ssr + SSI_SSR_ERROR_REG);
  511. writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
  512. /* Flush break */
  513. writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
  514. /* Clear interrupts */
  515. writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  516. writel_relaxed(0xffffff00,
  517. omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
  518. writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  519. writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
  520. /* Dequeue all pending requests */
  521. for (i = 0; i < omap_port->channels; i++) {
  522. /* Release write clocks */
  523. if (!list_empty(&omap_port->txqueue[i]))
  524. pm_runtime_put_autosuspend(omap_port->pdev);
  525. ssi_flush_queue(&omap_port->txqueue[i], NULL);
  526. ssi_flush_queue(&omap_port->rxqueue[i], NULL);
  527. }
  528. ssi_flush_queue(&omap_port->brkqueue, NULL);
  529. /* Resume SSI communication */
  530. pinctrl_pm_select_default_state(omap_port->pdev);
  531. spin_unlock_bh(&omap_port->lock);
  532. pm_runtime_mark_last_busy(omap_port->pdev);
  533. pm_runtime_put_autosuspend(omap_port->pdev);
  534. return 0;
  535. }
  536. static void start_tx_work(struct work_struct *work)
  537. {
  538. struct omap_ssi_port *omap_port =
  539. container_of(work, struct omap_ssi_port, work);
  540. struct hsi_port *port = to_hsi_port(omap_port->dev);
  541. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  542. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  543. pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
  544. writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
  545. }
  546. static int ssi_start_tx(struct hsi_client *cl)
  547. {
  548. struct hsi_port *port = hsi_get_port(cl);
  549. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  550. dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
  551. spin_lock_bh(&omap_port->wk_lock);
  552. if (omap_port->wk_refcount++) {
  553. spin_unlock_bh(&omap_port->wk_lock);
  554. return 0;
  555. }
  556. spin_unlock_bh(&omap_port->wk_lock);
  557. schedule_work(&omap_port->work);
  558. return 0;
  559. }
  560. static int ssi_stop_tx(struct hsi_client *cl)
  561. {
  562. struct hsi_port *port = hsi_get_port(cl);
  563. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  564. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  565. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  566. dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
  567. spin_lock_bh(&omap_port->wk_lock);
  568. BUG_ON(!omap_port->wk_refcount);
  569. if (--omap_port->wk_refcount) {
  570. spin_unlock_bh(&omap_port->wk_lock);
  571. return 0;
  572. }
  573. writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
  574. spin_unlock_bh(&omap_port->wk_lock);
  575. pm_runtime_mark_last_busy(omap_port->pdev);
  576. pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
  577. return 0;
  578. }
  579. static void ssi_transfer(struct omap_ssi_port *omap_port,
  580. struct list_head *queue)
  581. {
  582. struct hsi_msg *msg;
  583. int err = -1;
  584. pm_runtime_get(omap_port->pdev);
  585. spin_lock_bh(&omap_port->lock);
  586. while (err < 0) {
  587. err = ssi_start_transfer(queue);
  588. if (err < 0) {
  589. msg = list_first_entry(queue, struct hsi_msg, link);
  590. msg->status = HSI_STATUS_ERROR;
  591. msg->actual_len = 0;
  592. list_del(&msg->link);
  593. spin_unlock_bh(&omap_port->lock);
  594. msg->complete(msg);
  595. spin_lock_bh(&omap_port->lock);
  596. }
  597. }
  598. spin_unlock_bh(&omap_port->lock);
  599. pm_runtime_mark_last_busy(omap_port->pdev);
  600. pm_runtime_put_autosuspend(omap_port->pdev);
  601. }
  602. static void ssi_cleanup_queues(struct hsi_client *cl)
  603. {
  604. struct hsi_port *port = hsi_get_port(cl);
  605. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  606. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  607. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  608. struct hsi_msg *msg;
  609. unsigned int i;
  610. u32 rxbufstate = 0;
  611. u32 txbufstate = 0;
  612. u32 status = SSI_ERROROCCURED;
  613. u32 tmp;
  614. ssi_flush_queue(&omap_port->brkqueue, cl);
  615. if (list_empty(&omap_port->brkqueue))
  616. status |= SSI_BREAKDETECTED;
  617. for (i = 0; i < omap_port->channels; i++) {
  618. if (list_empty(&omap_port->txqueue[i]))
  619. continue;
  620. msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
  621. link);
  622. if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
  623. txbufstate |= (1 << i);
  624. status |= SSI_DATAACCEPT(i);
  625. /* Release the clocks writes, also GDD ones */
  626. pm_runtime_mark_last_busy(omap_port->pdev);
  627. pm_runtime_put_autosuspend(omap_port->pdev);
  628. }
  629. ssi_flush_queue(&omap_port->txqueue[i], cl);
  630. }
  631. for (i = 0; i < omap_port->channels; i++) {
  632. if (list_empty(&omap_port->rxqueue[i]))
  633. continue;
  634. msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
  635. link);
  636. if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
  637. rxbufstate |= (1 << i);
  638. status |= SSI_DATAAVAILABLE(i);
  639. }
  640. ssi_flush_queue(&omap_port->rxqueue[i], cl);
  641. /* Check if we keep the error detection interrupt armed */
  642. if (!list_empty(&omap_port->rxqueue[i]))
  643. status &= ~SSI_ERROROCCURED;
  644. }
  645. /* Cleanup write buffers */
  646. tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
  647. tmp &= ~txbufstate;
  648. writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
  649. /* Cleanup read buffers */
  650. tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
  651. tmp &= ~rxbufstate;
  652. writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
  653. /* Disarm and ack pending interrupts */
  654. tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  655. tmp &= ~status;
  656. writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  657. writel_relaxed(status, omap_ssi->sys +
  658. SSI_MPU_STATUS_REG(port->num, 0));
  659. }
  660. static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
  661. {
  662. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  663. struct hsi_port *port = hsi_get_port(cl);
  664. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  665. struct hsi_msg *msg;
  666. unsigned int i;
  667. u32 val = 0;
  668. u32 tmp;
  669. for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
  670. msg = omap_ssi->gdd_trn[i].msg;
  671. if ((!msg) || (msg->cl != cl))
  672. continue;
  673. writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
  674. val |= (1 << i);
  675. /*
  676. * Clock references for write will be handled in
  677. * ssi_cleanup_queues
  678. */
  679. if (msg->ttype == HSI_MSG_READ) {
  680. pm_runtime_mark_last_busy(omap_port->pdev);
  681. pm_runtime_put_autosuspend(omap_port->pdev);
  682. }
  683. omap_ssi->gdd_trn[i].msg = NULL;
  684. }
  685. tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  686. tmp &= ~val;
  687. writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  688. writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
  689. }
  690. static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
  691. {
  692. writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
  693. writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
  694. /* OCP barrier */
  695. mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
  696. return 0;
  697. }
  698. static int ssi_release(struct hsi_client *cl)
  699. {
  700. struct hsi_port *port = hsi_get_port(cl);
  701. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  702. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  703. pm_runtime_get_sync(omap_port->pdev);
  704. spin_lock_bh(&omap_port->lock);
  705. /* Stop all the pending DMA requests for that client */
  706. ssi_cleanup_gdd(ssi, cl);
  707. /* Now cleanup all the queues */
  708. ssi_cleanup_queues(cl);
  709. /* If it is the last client of the port, do extra checks and cleanup */
  710. if (port->claimed <= 1) {
  711. /*
  712. * Drop the clock reference for the incoming wake line
  713. * if it is still kept high by the other side.
  714. */
  715. if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags))
  716. pm_runtime_put_sync(omap_port->pdev);
  717. pm_runtime_get(omap_port->pdev);
  718. /* Stop any SSI TX/RX without a client */
  719. ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
  720. omap_port->sst.mode = SSI_MODE_SLEEP;
  721. omap_port->ssr.mode = SSI_MODE_SLEEP;
  722. pm_runtime_put(omap_port->pdev);
  723. WARN_ON(omap_port->wk_refcount != 0);
  724. }
  725. spin_unlock_bh(&omap_port->lock);
  726. pm_runtime_put_sync(omap_port->pdev);
  727. return 0;
  728. }
  729. static void ssi_error(struct hsi_port *port)
  730. {
  731. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  732. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  733. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  734. struct hsi_msg *msg;
  735. unsigned int i;
  736. u32 err;
  737. u32 val;
  738. u32 tmp;
  739. /* ACK error */
  740. err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
  741. dev_err(&port->device, "SSI error: 0x%02x\n", err);
  742. if (!err) {
  743. dev_dbg(&port->device, "spurious SSI error ignored!\n");
  744. return;
  745. }
  746. spin_lock(&omap_ssi->lock);
  747. /* Cancel all GDD read transfers */
  748. for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
  749. msg = omap_ssi->gdd_trn[i].msg;
  750. if ((msg) && (msg->ttype == HSI_MSG_READ)) {
  751. writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
  752. val |= (1 << i);
  753. omap_ssi->gdd_trn[i].msg = NULL;
  754. }
  755. }
  756. tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  757. tmp &= ~val;
  758. writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  759. spin_unlock(&omap_ssi->lock);
  760. /* Cancel all PIO read transfers */
  761. spin_lock(&omap_port->lock);
  762. tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  763. tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
  764. writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  765. /* ACK error */
  766. writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
  767. writel_relaxed(SSI_ERROROCCURED,
  768. omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
  769. /* Signal the error all current pending read requests */
  770. for (i = 0; i < omap_port->channels; i++) {
  771. if (list_empty(&omap_port->rxqueue[i]))
  772. continue;
  773. msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
  774. link);
  775. list_del(&msg->link);
  776. msg->status = HSI_STATUS_ERROR;
  777. spin_unlock(&omap_port->lock);
  778. msg->complete(msg);
  779. /* Now restart queued reads if any */
  780. ssi_transfer(omap_port, &omap_port->rxqueue[i]);
  781. spin_lock(&omap_port->lock);
  782. }
  783. spin_unlock(&omap_port->lock);
  784. }
  785. static void ssi_break_complete(struct hsi_port *port)
  786. {
  787. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  788. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  789. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  790. struct hsi_msg *msg;
  791. struct hsi_msg *tmp;
  792. u32 val;
  793. dev_dbg(&port->device, "HWBREAK received\n");
  794. spin_lock(&omap_port->lock);
  795. val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  796. val &= ~SSI_BREAKDETECTED;
  797. writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  798. writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
  799. writel(SSI_BREAKDETECTED,
  800. omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
  801. spin_unlock(&omap_port->lock);
  802. list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
  803. msg->status = HSI_STATUS_COMPLETED;
  804. spin_lock(&omap_port->lock);
  805. list_del(&msg->link);
  806. spin_unlock(&omap_port->lock);
  807. msg->complete(msg);
  808. }
  809. }
  810. static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
  811. {
  812. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  813. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  814. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  815. struct hsi_msg *msg;
  816. u32 *buf;
  817. u32 reg;
  818. u32 val;
  819. spin_lock_bh(&omap_port->lock);
  820. msg = list_first_entry(queue, struct hsi_msg, link);
  821. if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
  822. msg->actual_len = 0;
  823. msg->status = HSI_STATUS_PENDING;
  824. }
  825. if (msg->ttype == HSI_MSG_WRITE)
  826. val = SSI_DATAACCEPT(msg->channel);
  827. else
  828. val = SSI_DATAAVAILABLE(msg->channel);
  829. if (msg->status == HSI_STATUS_PROCEEDING) {
  830. buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
  831. if (msg->ttype == HSI_MSG_WRITE)
  832. writel(*buf, omap_port->sst_base +
  833. SSI_SST_BUFFER_CH_REG(msg->channel));
  834. else
  835. *buf = readl(omap_port->ssr_base +
  836. SSI_SSR_BUFFER_CH_REG(msg->channel));
  837. dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
  838. msg->ttype, *buf);
  839. msg->actual_len += sizeof(*buf);
  840. if (msg->actual_len >= msg->sgt.sgl->length)
  841. msg->status = HSI_STATUS_COMPLETED;
  842. /*
  843. * Wait for the last written frame to be really sent before
  844. * we call the complete callback
  845. */
  846. if ((msg->status == HSI_STATUS_PROCEEDING) ||
  847. ((msg->status == HSI_STATUS_COMPLETED) &&
  848. (msg->ttype == HSI_MSG_WRITE))) {
  849. writel(val, omap_ssi->sys +
  850. SSI_MPU_STATUS_REG(port->num, 0));
  851. spin_unlock_bh(&omap_port->lock);
  852. return;
  853. }
  854. }
  855. /* Transfer completed at this point */
  856. reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  857. if (msg->ttype == HSI_MSG_WRITE) {
  858. /* Release clocks for write transfer */
  859. pm_runtime_mark_last_busy(omap_port->pdev);
  860. pm_runtime_put_autosuspend(omap_port->pdev);
  861. }
  862. reg &= ~val;
  863. writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  864. writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
  865. list_del(&msg->link);
  866. spin_unlock_bh(&omap_port->lock);
  867. msg->complete(msg);
  868. ssi_transfer(omap_port, queue);
  869. }
  870. static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
  871. {
  872. struct hsi_port *port = (struct hsi_port *)ssi_port;
  873. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  874. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  875. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  876. void __iomem *sys = omap_ssi->sys;
  877. unsigned int ch;
  878. u32 status_reg;
  879. pm_runtime_get_sync(omap_port->pdev);
  880. do {
  881. status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
  882. status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
  883. for (ch = 0; ch < omap_port->channels; ch++) {
  884. if (status_reg & SSI_DATAACCEPT(ch))
  885. ssi_pio_complete(port, &omap_port->txqueue[ch]);
  886. if (status_reg & SSI_DATAAVAILABLE(ch))
  887. ssi_pio_complete(port, &omap_port->rxqueue[ch]);
  888. }
  889. if (status_reg & SSI_BREAKDETECTED)
  890. ssi_break_complete(port);
  891. if (status_reg & SSI_ERROROCCURED)
  892. ssi_error(port);
  893. status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
  894. status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
  895. /* TODO: sleep if we retry? */
  896. } while (status_reg);
  897. pm_runtime_mark_last_busy(omap_port->pdev);
  898. pm_runtime_put_autosuspend(omap_port->pdev);
  899. return IRQ_HANDLED;
  900. }
  901. static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
  902. {
  903. struct hsi_port *port = (struct hsi_port *)ssi_port;
  904. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  905. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  906. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  907. if (ssi_wakein(port)) {
  908. /**
  909. * We can have a quick High-Low-High transition in the line.
  910. * In such a case if we have long interrupt latencies,
  911. * we can miss the low event or get twice a high event.
  912. * This workaround will avoid breaking the clock reference
  913. * count when such a situation ocurrs.
  914. */
  915. if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags))
  916. pm_runtime_get_sync(omap_port->pdev);
  917. dev_dbg(&ssi->device, "Wake in high\n");
  918. if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
  919. writel(SSI_WAKE(0),
  920. omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
  921. }
  922. hsi_event(port, HSI_EVENT_START_RX);
  923. } else {
  924. dev_dbg(&ssi->device, "Wake in low\n");
  925. if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
  926. writel(SSI_WAKE(0),
  927. omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
  928. }
  929. hsi_event(port, HSI_EVENT_STOP_RX);
  930. if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) {
  931. pm_runtime_mark_last_busy(omap_port->pdev);
  932. pm_runtime_put_autosuspend(omap_port->pdev);
  933. }
  934. }
  935. return IRQ_HANDLED;
  936. }
  937. static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd)
  938. {
  939. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  940. int err;
  941. err = platform_get_irq(pd, 0);
  942. if (err < 0) {
  943. dev_err(&port->device, "Port IRQ resource missing\n");
  944. return err;
  945. }
  946. omap_port->irq = err;
  947. err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL,
  948. ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port);
  949. if (err < 0)
  950. dev_err(&port->device, "Request IRQ %d failed (%d)\n",
  951. omap_port->irq, err);
  952. return err;
  953. }
  954. static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd)
  955. {
  956. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  957. int cawake_irq;
  958. int err;
  959. if (!omap_port->wake_gpio) {
  960. omap_port->wake_irq = -1;
  961. return 0;
  962. }
  963. cawake_irq = gpiod_to_irq(omap_port->wake_gpio);
  964. omap_port->wake_irq = cawake_irq;
  965. err = devm_request_threaded_irq(&port->device, cawake_irq, NULL,
  966. ssi_wake_thread,
  967. IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
  968. "SSI cawake", port);
  969. if (err < 0)
  970. dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
  971. cawake_irq, err);
  972. err = enable_irq_wake(cawake_irq);
  973. if (err < 0)
  974. dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
  975. cawake_irq, err);
  976. return err;
  977. }
  978. static void ssi_queues_init(struct omap_ssi_port *omap_port)
  979. {
  980. unsigned int ch;
  981. for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
  982. INIT_LIST_HEAD(&omap_port->txqueue[ch]);
  983. INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
  984. }
  985. INIT_LIST_HEAD(&omap_port->brkqueue);
  986. }
  987. static int ssi_port_get_iomem(struct platform_device *pd,
  988. const char *name, void __iomem **pbase, dma_addr_t *phy)
  989. {
  990. struct hsi_port *port = platform_get_drvdata(pd);
  991. struct resource *mem;
  992. struct resource *ioarea;
  993. void __iomem *base;
  994. mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
  995. if (!mem) {
  996. dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
  997. return -ENXIO;
  998. }
  999. ioarea = devm_request_mem_region(&port->device, mem->start,
  1000. resource_size(mem), dev_name(&pd->dev));
  1001. if (!ioarea) {
  1002. dev_err(&pd->dev, "%s IO memory region request failed\n",
  1003. mem->name);
  1004. return -ENXIO;
  1005. }
  1006. base = devm_ioremap(&port->device, mem->start, resource_size(mem));
  1007. if (!base) {
  1008. dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
  1009. return -ENXIO;
  1010. }
  1011. *pbase = base;
  1012. if (phy)
  1013. *phy = mem->start;
  1014. return 0;
  1015. }
  1016. static int ssi_port_probe(struct platform_device *pd)
  1017. {
  1018. struct device_node *np = pd->dev.of_node;
  1019. struct hsi_port *port;
  1020. struct omap_ssi_port *omap_port;
  1021. struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
  1022. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1023. struct gpio_desc *cawake_gpio = NULL;
  1024. u32 port_id;
  1025. int err;
  1026. dev_dbg(&pd->dev, "init ssi port...\n");
  1027. if (!ssi->port || !omap_ssi->port) {
  1028. dev_err(&pd->dev, "ssi controller not initialized!\n");
  1029. err = -ENODEV;
  1030. goto error;
  1031. }
  1032. /* get id of first uninitialized port in controller */
  1033. for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
  1034. port_id++)
  1035. ;
  1036. if (port_id >= ssi->num_ports) {
  1037. dev_err(&pd->dev, "port id out of range!\n");
  1038. err = -ENODEV;
  1039. goto error;
  1040. }
  1041. port = ssi->port[port_id];
  1042. if (!np) {
  1043. dev_err(&pd->dev, "missing device tree data\n");
  1044. err = -EINVAL;
  1045. goto error;
  1046. }
  1047. cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN);
  1048. if (IS_ERR(cawake_gpio)) {
  1049. err = PTR_ERR(cawake_gpio);
  1050. dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err);
  1051. goto error;
  1052. }
  1053. omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
  1054. if (!omap_port) {
  1055. err = -ENOMEM;
  1056. goto error;
  1057. }
  1058. omap_port->wake_gpio = cawake_gpio;
  1059. omap_port->pdev = &pd->dev;
  1060. omap_port->port_id = port_id;
  1061. INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue);
  1062. INIT_WORK(&omap_port->work, start_tx_work);
  1063. /* initialize HSI port */
  1064. port->async = ssi_async;
  1065. port->setup = ssi_setup;
  1066. port->flush = ssi_flush;
  1067. port->start_tx = ssi_start_tx;
  1068. port->stop_tx = ssi_stop_tx;
  1069. port->release = ssi_release;
  1070. hsi_port_set_drvdata(port, omap_port);
  1071. omap_ssi->port[port_id] = omap_port;
  1072. platform_set_drvdata(pd, port);
  1073. err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
  1074. &omap_port->sst_dma);
  1075. if (err < 0)
  1076. goto error;
  1077. err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
  1078. &omap_port->ssr_dma);
  1079. if (err < 0)
  1080. goto error;
  1081. err = ssi_port_irq(port, pd);
  1082. if (err < 0)
  1083. goto error;
  1084. err = ssi_wake_irq(port, pd);
  1085. if (err < 0)
  1086. goto error;
  1087. ssi_queues_init(omap_port);
  1088. spin_lock_init(&omap_port->lock);
  1089. spin_lock_init(&omap_port->wk_lock);
  1090. omap_port->dev = &port->device;
  1091. pm_runtime_use_autosuspend(omap_port->pdev);
  1092. pm_runtime_set_autosuspend_delay(omap_port->pdev, 250);
  1093. pm_runtime_enable(omap_port->pdev);
  1094. #ifdef CONFIG_DEBUG_FS
  1095. err = ssi_debug_add_port(omap_port, omap_ssi->dir);
  1096. if (err < 0) {
  1097. pm_runtime_disable(omap_port->pdev);
  1098. goto error;
  1099. }
  1100. #endif
  1101. hsi_add_clients_from_dt(port, np);
  1102. dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id);
  1103. return 0;
  1104. error:
  1105. return err;
  1106. }
  1107. static int ssi_port_remove(struct platform_device *pd)
  1108. {
  1109. struct hsi_port *port = platform_get_drvdata(pd);
  1110. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  1111. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1112. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1113. #ifdef CONFIG_DEBUG_FS
  1114. ssi_debug_remove_port(port);
  1115. #endif
  1116. cancel_delayed_work_sync(&omap_port->errqueue_work);
  1117. hsi_port_unregister_clients(port);
  1118. port->async = hsi_dummy_msg;
  1119. port->setup = hsi_dummy_cl;
  1120. port->flush = hsi_dummy_cl;
  1121. port->start_tx = hsi_dummy_cl;
  1122. port->stop_tx = hsi_dummy_cl;
  1123. port->release = hsi_dummy_cl;
  1124. omap_ssi->port[omap_port->port_id] = NULL;
  1125. platform_set_drvdata(pd, NULL);
  1126. pm_runtime_dont_use_autosuspend(&pd->dev);
  1127. pm_runtime_disable(&pd->dev);
  1128. return 0;
  1129. }
  1130. static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
  1131. {
  1132. writel_relaxed(omap_port->sst.divisor,
  1133. omap_port->sst_base + SSI_SST_DIVISOR_REG);
  1134. return 0;
  1135. }
  1136. void omap_ssi_port_update_fclk(struct hsi_controller *ssi,
  1137. struct omap_ssi_port *omap_port)
  1138. {
  1139. /* update divisor */
  1140. u32 div = ssi_calculate_div(ssi);
  1141. omap_port->sst.divisor = div;
  1142. ssi_restore_divisor(omap_port);
  1143. }
  1144. #ifdef CONFIG_PM
  1145. static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
  1146. {
  1147. struct hsi_port *port = to_hsi_port(omap_port->dev);
  1148. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1149. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1150. omap_port->sys_mpu_enable = readl(omap_ssi->sys +
  1151. SSI_MPU_ENABLE_REG(port->num, 0));
  1152. return 0;
  1153. }
  1154. static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
  1155. {
  1156. struct hsi_port *port = to_hsi_port(omap_port->dev);
  1157. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1158. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1159. void __iomem *base;
  1160. writel_relaxed(omap_port->sys_mpu_enable,
  1161. omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  1162. /* SST context */
  1163. base = omap_port->sst_base;
  1164. writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
  1165. writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
  1166. writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
  1167. /* SSR context */
  1168. base = omap_port->ssr_base;
  1169. writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
  1170. writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
  1171. writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
  1172. return 0;
  1173. }
  1174. static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
  1175. {
  1176. u32 mode;
  1177. writel_relaxed(omap_port->sst.mode,
  1178. omap_port->sst_base + SSI_SST_MODE_REG);
  1179. writel_relaxed(omap_port->ssr.mode,
  1180. omap_port->ssr_base + SSI_SSR_MODE_REG);
  1181. /* OCP barrier */
  1182. mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
  1183. return 0;
  1184. }
  1185. static int omap_ssi_port_runtime_suspend(struct device *dev)
  1186. {
  1187. struct hsi_port *port = dev_get_drvdata(dev);
  1188. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  1189. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1190. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1191. dev_dbg(dev, "port runtime suspend!\n");
  1192. ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
  1193. if (omap_ssi->get_loss)
  1194. omap_port->loss_count =
  1195. omap_ssi->get_loss(ssi->device.parent);
  1196. ssi_save_port_ctx(omap_port);
  1197. return 0;
  1198. }
  1199. static int omap_ssi_port_runtime_resume(struct device *dev)
  1200. {
  1201. struct hsi_port *port = dev_get_drvdata(dev);
  1202. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  1203. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1204. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1205. dev_dbg(dev, "port runtime resume!\n");
  1206. if ((omap_ssi->get_loss) && (omap_port->loss_count ==
  1207. omap_ssi->get_loss(ssi->device.parent)))
  1208. goto mode; /* We always need to restore the mode & TX divisor */
  1209. ssi_restore_port_ctx(omap_port);
  1210. mode:
  1211. ssi_restore_divisor(omap_port);
  1212. ssi_restore_port_mode(omap_port);
  1213. return 0;
  1214. }
  1215. static const struct dev_pm_ops omap_ssi_port_pm_ops = {
  1216. SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
  1217. omap_ssi_port_runtime_resume, NULL)
  1218. };
  1219. #define DEV_PM_OPS (&omap_ssi_port_pm_ops)
  1220. #else
  1221. #define DEV_PM_OPS NULL
  1222. #endif
  1223. #ifdef CONFIG_OF
  1224. static const struct of_device_id omap_ssi_port_of_match[] = {
  1225. { .compatible = "ti,omap3-ssi-port", },
  1226. {},
  1227. };
  1228. MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
  1229. #else
  1230. #define omap_ssi_port_of_match NULL
  1231. #endif
  1232. struct platform_driver ssi_port_pdriver = {
  1233. .probe = ssi_port_probe,
  1234. .remove = ssi_port_remove,
  1235. .driver = {
  1236. .name = "omap_ssi_port",
  1237. .of_match_table = omap_ssi_port_of_match,
  1238. .pm = DEV_PM_OPS,
  1239. },
  1240. };