tegra20-apb-dma.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599
  1. /*
  2. * DMA driver for Nvidia's Tegra20 APB DMA controller.
  3. *
  4. * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/bitops.h>
  19. #include <linux/clk.h>
  20. #include <linux/delay.h>
  21. #include <linux/dmaengine.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/err.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/io.h>
  27. #include <linux/mm.h>
  28. #include <linux/module.h>
  29. #include <linux/of.h>
  30. #include <linux/of_device.h>
  31. #include <linux/of_dma.h>
  32. #include <linux/platform_device.h>
  33. #include <linux/pm.h>
  34. #include <linux/pm_runtime.h>
  35. #include <linux/reset.h>
  36. #include <linux/slab.h>
  37. #include "dmaengine.h"
  38. #define TEGRA_APBDMA_GENERAL 0x0
  39. #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
  40. #define TEGRA_APBDMA_CONTROL 0x010
  41. #define TEGRA_APBDMA_IRQ_MASK 0x01c
  42. #define TEGRA_APBDMA_IRQ_MASK_SET 0x020
  43. /* CSR register */
  44. #define TEGRA_APBDMA_CHAN_CSR 0x00
  45. #define TEGRA_APBDMA_CSR_ENB BIT(31)
  46. #define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
  47. #define TEGRA_APBDMA_CSR_HOLD BIT(29)
  48. #define TEGRA_APBDMA_CSR_DIR BIT(28)
  49. #define TEGRA_APBDMA_CSR_ONCE BIT(27)
  50. #define TEGRA_APBDMA_CSR_FLOW BIT(21)
  51. #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
  52. #define TEGRA_APBDMA_CSR_REQ_SEL_MASK 0x1F
  53. #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
  54. /* STATUS register */
  55. #define TEGRA_APBDMA_CHAN_STATUS 0x004
  56. #define TEGRA_APBDMA_STATUS_BUSY BIT(31)
  57. #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
  58. #define TEGRA_APBDMA_STATUS_HALT BIT(29)
  59. #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
  60. #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
  61. #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
  62. #define TEGRA_APBDMA_CHAN_CSRE 0x00C
  63. #define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
  64. /* AHB memory address */
  65. #define TEGRA_APBDMA_CHAN_AHBPTR 0x010
  66. /* AHB sequence register */
  67. #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
  68. #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
  69. #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
  70. #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
  71. #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
  72. #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
  73. #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
  74. #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
  75. #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
  76. #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
  77. #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
  78. #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
  79. #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
  80. #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
  81. /* APB address */
  82. #define TEGRA_APBDMA_CHAN_APBPTR 0x018
  83. /* APB sequence register */
  84. #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
  85. #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
  86. #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
  87. #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
  88. #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
  89. #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
  90. #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
  91. #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
  92. /* Tegra148 specific registers */
  93. #define TEGRA_APBDMA_CHAN_WCOUNT 0x20
  94. #define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24
  95. /*
  96. * If any burst is in flight and DMA paused then this is the time to complete
  97. * on-flight burst and update DMA status register.
  98. */
  99. #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
  100. /* Channel base address offset from APBDMA base address */
  101. #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
  102. #define TEGRA_APBDMA_SLAVE_ID_INVALID (TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1)
  103. struct tegra_dma;
  104. /*
  105. * tegra_dma_chip_data Tegra chip specific DMA data
  106. * @nr_channels: Number of channels available in the controller.
  107. * @channel_reg_size: Channel register size/stride.
  108. * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
  109. * @support_channel_pause: Support channel wise pause of dma.
  110. * @support_separate_wcount_reg: Support separate word count register.
  111. */
  112. struct tegra_dma_chip_data {
  113. int nr_channels;
  114. int channel_reg_size;
  115. int max_dma_count;
  116. bool support_channel_pause;
  117. bool support_separate_wcount_reg;
  118. };
  119. /* DMA channel registers */
  120. struct tegra_dma_channel_regs {
  121. unsigned long csr;
  122. unsigned long ahb_ptr;
  123. unsigned long apb_ptr;
  124. unsigned long ahb_seq;
  125. unsigned long apb_seq;
  126. unsigned long wcount;
  127. };
  128. /*
  129. * tegra_dma_sg_req: Dma request details to configure hardware. This
  130. * contains the details for one transfer to configure DMA hw.
  131. * The client's request for data transfer can be broken into multiple
  132. * sub-transfer as per requester details and hw support.
  133. * This sub transfer get added in the list of transfer and point to Tegra
  134. * DMA descriptor which manages the transfer details.
  135. */
  136. struct tegra_dma_sg_req {
  137. struct tegra_dma_channel_regs ch_regs;
  138. int req_len;
  139. bool configured;
  140. bool last_sg;
  141. struct list_head node;
  142. struct tegra_dma_desc *dma_desc;
  143. };
  144. /*
  145. * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
  146. * This descriptor keep track of transfer status, callbacks and request
  147. * counts etc.
  148. */
  149. struct tegra_dma_desc {
  150. struct dma_async_tx_descriptor txd;
  151. int bytes_requested;
  152. int bytes_transferred;
  153. enum dma_status dma_status;
  154. struct list_head node;
  155. struct list_head tx_list;
  156. struct list_head cb_node;
  157. int cb_count;
  158. };
  159. struct tegra_dma_channel;
  160. typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
  161. bool to_terminate);
  162. /* tegra_dma_channel: Channel specific information */
  163. struct tegra_dma_channel {
  164. struct dma_chan dma_chan;
  165. char name[30];
  166. bool config_init;
  167. int id;
  168. int irq;
  169. void __iomem *chan_addr;
  170. spinlock_t lock;
  171. bool busy;
  172. struct tegra_dma *tdma;
  173. bool cyclic;
  174. /* Different lists for managing the requests */
  175. struct list_head free_sg_req;
  176. struct list_head pending_sg_req;
  177. struct list_head free_dma_desc;
  178. struct list_head cb_desc;
  179. /* ISR handler and tasklet for bottom half of isr handling */
  180. dma_isr_handler isr_handler;
  181. struct tasklet_struct tasklet;
  182. /* Channel-slave specific configuration */
  183. unsigned int slave_id;
  184. struct dma_slave_config dma_sconfig;
  185. struct tegra_dma_channel_regs channel_reg;
  186. };
  187. /* tegra_dma: Tegra DMA specific information */
  188. struct tegra_dma {
  189. struct dma_device dma_dev;
  190. struct device *dev;
  191. struct clk *dma_clk;
  192. struct reset_control *rst;
  193. spinlock_t global_lock;
  194. void __iomem *base_addr;
  195. const struct tegra_dma_chip_data *chip_data;
  196. /*
  197. * Counter for managing global pausing of the DMA controller.
  198. * Only applicable for devices that don't support individual
  199. * channel pausing.
  200. */
  201. u32 global_pause_count;
  202. /* Some register need to be cache before suspend */
  203. u32 reg_gen;
  204. /* Last member of the structure */
  205. struct tegra_dma_channel channels[0];
  206. };
  207. static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
  208. {
  209. writel(val, tdma->base_addr + reg);
  210. }
  211. static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
  212. {
  213. return readl(tdma->base_addr + reg);
  214. }
  215. static inline void tdc_write(struct tegra_dma_channel *tdc,
  216. u32 reg, u32 val)
  217. {
  218. writel(val, tdc->chan_addr + reg);
  219. }
  220. static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
  221. {
  222. return readl(tdc->chan_addr + reg);
  223. }
  224. static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
  225. {
  226. return container_of(dc, struct tegra_dma_channel, dma_chan);
  227. }
  228. static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
  229. struct dma_async_tx_descriptor *td)
  230. {
  231. return container_of(td, struct tegra_dma_desc, txd);
  232. }
  233. static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
  234. {
  235. return &tdc->dma_chan.dev->device;
  236. }
  237. static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
  238. static int tegra_dma_runtime_suspend(struct device *dev);
  239. static int tegra_dma_runtime_resume(struct device *dev);
  240. /* Get DMA desc from free list, if not there then allocate it. */
  241. static struct tegra_dma_desc *tegra_dma_desc_get(
  242. struct tegra_dma_channel *tdc)
  243. {
  244. struct tegra_dma_desc *dma_desc;
  245. unsigned long flags;
  246. spin_lock_irqsave(&tdc->lock, flags);
  247. /* Do not allocate if desc are waiting for ack */
  248. list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
  249. if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
  250. list_del(&dma_desc->node);
  251. spin_unlock_irqrestore(&tdc->lock, flags);
  252. dma_desc->txd.flags = 0;
  253. return dma_desc;
  254. }
  255. }
  256. spin_unlock_irqrestore(&tdc->lock, flags);
  257. /* Allocate DMA desc */
  258. dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
  259. if (!dma_desc)
  260. return NULL;
  261. dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
  262. dma_desc->txd.tx_submit = tegra_dma_tx_submit;
  263. dma_desc->txd.flags = 0;
  264. return dma_desc;
  265. }
  266. static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
  267. struct tegra_dma_desc *dma_desc)
  268. {
  269. unsigned long flags;
  270. spin_lock_irqsave(&tdc->lock, flags);
  271. if (!list_empty(&dma_desc->tx_list))
  272. list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
  273. list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
  274. spin_unlock_irqrestore(&tdc->lock, flags);
  275. }
  276. static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
  277. struct tegra_dma_channel *tdc)
  278. {
  279. struct tegra_dma_sg_req *sg_req = NULL;
  280. unsigned long flags;
  281. spin_lock_irqsave(&tdc->lock, flags);
  282. if (!list_empty(&tdc->free_sg_req)) {
  283. sg_req = list_first_entry(&tdc->free_sg_req,
  284. typeof(*sg_req), node);
  285. list_del(&sg_req->node);
  286. spin_unlock_irqrestore(&tdc->lock, flags);
  287. return sg_req;
  288. }
  289. spin_unlock_irqrestore(&tdc->lock, flags);
  290. sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT);
  291. return sg_req;
  292. }
  293. static int tegra_dma_slave_config(struct dma_chan *dc,
  294. struct dma_slave_config *sconfig)
  295. {
  296. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  297. if (!list_empty(&tdc->pending_sg_req)) {
  298. dev_err(tdc2dev(tdc), "Configuration not allowed\n");
  299. return -EBUSY;
  300. }
  301. memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
  302. if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID) {
  303. if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
  304. return -EINVAL;
  305. tdc->slave_id = sconfig->slave_id;
  306. }
  307. tdc->config_init = true;
  308. return 0;
  309. }
  310. static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
  311. bool wait_for_burst_complete)
  312. {
  313. struct tegra_dma *tdma = tdc->tdma;
  314. spin_lock(&tdma->global_lock);
  315. if (tdc->tdma->global_pause_count == 0) {
  316. tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
  317. if (wait_for_burst_complete)
  318. udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
  319. }
  320. tdc->tdma->global_pause_count++;
  321. spin_unlock(&tdma->global_lock);
  322. }
  323. static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
  324. {
  325. struct tegra_dma *tdma = tdc->tdma;
  326. spin_lock(&tdma->global_lock);
  327. if (WARN_ON(tdc->tdma->global_pause_count == 0))
  328. goto out;
  329. if (--tdc->tdma->global_pause_count == 0)
  330. tdma_write(tdma, TEGRA_APBDMA_GENERAL,
  331. TEGRA_APBDMA_GENERAL_ENABLE);
  332. out:
  333. spin_unlock(&tdma->global_lock);
  334. }
  335. static void tegra_dma_pause(struct tegra_dma_channel *tdc,
  336. bool wait_for_burst_complete)
  337. {
  338. struct tegra_dma *tdma = tdc->tdma;
  339. if (tdma->chip_data->support_channel_pause) {
  340. tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
  341. TEGRA_APBDMA_CHAN_CSRE_PAUSE);
  342. if (wait_for_burst_complete)
  343. udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
  344. } else {
  345. tegra_dma_global_pause(tdc, wait_for_burst_complete);
  346. }
  347. }
  348. static void tegra_dma_resume(struct tegra_dma_channel *tdc)
  349. {
  350. struct tegra_dma *tdma = tdc->tdma;
  351. if (tdma->chip_data->support_channel_pause) {
  352. tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
  353. } else {
  354. tegra_dma_global_resume(tdc);
  355. }
  356. }
  357. static void tegra_dma_stop(struct tegra_dma_channel *tdc)
  358. {
  359. u32 csr;
  360. u32 status;
  361. /* Disable interrupts */
  362. csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
  363. csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
  364. tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
  365. /* Disable DMA */
  366. csr &= ~TEGRA_APBDMA_CSR_ENB;
  367. tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
  368. /* Clear interrupt status if it is there */
  369. status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
  370. if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
  371. dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
  372. tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
  373. }
  374. tdc->busy = false;
  375. }
  376. static void tegra_dma_start(struct tegra_dma_channel *tdc,
  377. struct tegra_dma_sg_req *sg_req)
  378. {
  379. struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
  380. tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
  381. tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
  382. tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
  383. tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
  384. tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
  385. if (tdc->tdma->chip_data->support_separate_wcount_reg)
  386. tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
  387. /* Start DMA */
  388. tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
  389. ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
  390. }
  391. static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
  392. struct tegra_dma_sg_req *nsg_req)
  393. {
  394. unsigned long status;
  395. /*
  396. * The DMA controller reloads the new configuration for next transfer
  397. * after last burst of current transfer completes.
  398. * If there is no IEC status then this makes sure that last burst
  399. * has not be completed. There may be case that last burst is on
  400. * flight and so it can complete but because DMA is paused, it
  401. * will not generates interrupt as well as not reload the new
  402. * configuration.
  403. * If there is already IEC status then interrupt handler need to
  404. * load new configuration.
  405. */
  406. tegra_dma_pause(tdc, false);
  407. status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
  408. /*
  409. * If interrupt is pending then do nothing as the ISR will handle
  410. * the programing for new request.
  411. */
  412. if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
  413. dev_err(tdc2dev(tdc),
  414. "Skipping new configuration as interrupt is pending\n");
  415. tegra_dma_resume(tdc);
  416. return;
  417. }
  418. /* Safe to program new configuration */
  419. tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
  420. tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
  421. if (tdc->tdma->chip_data->support_separate_wcount_reg)
  422. tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
  423. nsg_req->ch_regs.wcount);
  424. tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
  425. nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
  426. nsg_req->configured = true;
  427. tegra_dma_resume(tdc);
  428. }
  429. static void tdc_start_head_req(struct tegra_dma_channel *tdc)
  430. {
  431. struct tegra_dma_sg_req *sg_req;
  432. if (list_empty(&tdc->pending_sg_req))
  433. return;
  434. sg_req = list_first_entry(&tdc->pending_sg_req,
  435. typeof(*sg_req), node);
  436. tegra_dma_start(tdc, sg_req);
  437. sg_req->configured = true;
  438. tdc->busy = true;
  439. }
  440. static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
  441. {
  442. struct tegra_dma_sg_req *hsgreq;
  443. struct tegra_dma_sg_req *hnsgreq;
  444. if (list_empty(&tdc->pending_sg_req))
  445. return;
  446. hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
  447. if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
  448. hnsgreq = list_first_entry(&hsgreq->node,
  449. typeof(*hnsgreq), node);
  450. tegra_dma_configure_for_next(tdc, hnsgreq);
  451. }
  452. }
  453. static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
  454. struct tegra_dma_sg_req *sg_req, unsigned long status)
  455. {
  456. return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
  457. }
  458. static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
  459. {
  460. struct tegra_dma_sg_req *sgreq;
  461. struct tegra_dma_desc *dma_desc;
  462. while (!list_empty(&tdc->pending_sg_req)) {
  463. sgreq = list_first_entry(&tdc->pending_sg_req,
  464. typeof(*sgreq), node);
  465. list_move_tail(&sgreq->node, &tdc->free_sg_req);
  466. if (sgreq->last_sg) {
  467. dma_desc = sgreq->dma_desc;
  468. dma_desc->dma_status = DMA_ERROR;
  469. list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
  470. /* Add in cb list if it is not there. */
  471. if (!dma_desc->cb_count)
  472. list_add_tail(&dma_desc->cb_node,
  473. &tdc->cb_desc);
  474. dma_desc->cb_count++;
  475. }
  476. }
  477. tdc->isr_handler = NULL;
  478. }
  479. static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
  480. struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
  481. {
  482. struct tegra_dma_sg_req *hsgreq = NULL;
  483. if (list_empty(&tdc->pending_sg_req)) {
  484. dev_err(tdc2dev(tdc), "Dma is running without req\n");
  485. tegra_dma_stop(tdc);
  486. return false;
  487. }
  488. /*
  489. * Check that head req on list should be in flight.
  490. * If it is not in flight then abort transfer as
  491. * looping of transfer can not continue.
  492. */
  493. hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
  494. if (!hsgreq->configured) {
  495. tegra_dma_stop(tdc);
  496. dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
  497. tegra_dma_abort_all(tdc);
  498. return false;
  499. }
  500. /* Configure next request */
  501. if (!to_terminate)
  502. tdc_configure_next_head_desc(tdc);
  503. return true;
  504. }
  505. static void handle_once_dma_done(struct tegra_dma_channel *tdc,
  506. bool to_terminate)
  507. {
  508. struct tegra_dma_sg_req *sgreq;
  509. struct tegra_dma_desc *dma_desc;
  510. tdc->busy = false;
  511. sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
  512. dma_desc = sgreq->dma_desc;
  513. dma_desc->bytes_transferred += sgreq->req_len;
  514. list_del(&sgreq->node);
  515. if (sgreq->last_sg) {
  516. dma_desc->dma_status = DMA_COMPLETE;
  517. dma_cookie_complete(&dma_desc->txd);
  518. if (!dma_desc->cb_count)
  519. list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
  520. dma_desc->cb_count++;
  521. list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
  522. }
  523. list_add_tail(&sgreq->node, &tdc->free_sg_req);
  524. /* Do not start DMA if it is going to be terminate */
  525. if (to_terminate || list_empty(&tdc->pending_sg_req))
  526. return;
  527. tdc_start_head_req(tdc);
  528. }
  529. static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
  530. bool to_terminate)
  531. {
  532. struct tegra_dma_sg_req *sgreq;
  533. struct tegra_dma_desc *dma_desc;
  534. bool st;
  535. sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
  536. dma_desc = sgreq->dma_desc;
  537. /* if we dma for long enough the transfer count will wrap */
  538. dma_desc->bytes_transferred =
  539. (dma_desc->bytes_transferred + sgreq->req_len) %
  540. dma_desc->bytes_requested;
  541. /* Callback need to be call */
  542. if (!dma_desc->cb_count)
  543. list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
  544. dma_desc->cb_count++;
  545. /* If not last req then put at end of pending list */
  546. if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
  547. list_move_tail(&sgreq->node, &tdc->pending_sg_req);
  548. sgreq->configured = false;
  549. st = handle_continuous_head_request(tdc, sgreq, to_terminate);
  550. if (!st)
  551. dma_desc->dma_status = DMA_ERROR;
  552. }
  553. }
  554. static void tegra_dma_tasklet(unsigned long data)
  555. {
  556. struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
  557. struct dmaengine_desc_callback cb;
  558. struct tegra_dma_desc *dma_desc;
  559. unsigned long flags;
  560. int cb_count;
  561. spin_lock_irqsave(&tdc->lock, flags);
  562. while (!list_empty(&tdc->cb_desc)) {
  563. dma_desc = list_first_entry(&tdc->cb_desc,
  564. typeof(*dma_desc), cb_node);
  565. list_del(&dma_desc->cb_node);
  566. dmaengine_desc_get_callback(&dma_desc->txd, &cb);
  567. cb_count = dma_desc->cb_count;
  568. dma_desc->cb_count = 0;
  569. spin_unlock_irqrestore(&tdc->lock, flags);
  570. while (cb_count--)
  571. dmaengine_desc_callback_invoke(&cb, NULL);
  572. spin_lock_irqsave(&tdc->lock, flags);
  573. }
  574. spin_unlock_irqrestore(&tdc->lock, flags);
  575. }
  576. static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
  577. {
  578. struct tegra_dma_channel *tdc = dev_id;
  579. unsigned long status;
  580. unsigned long flags;
  581. spin_lock_irqsave(&tdc->lock, flags);
  582. status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
  583. if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
  584. tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
  585. tdc->isr_handler(tdc, false);
  586. tasklet_schedule(&tdc->tasklet);
  587. spin_unlock_irqrestore(&tdc->lock, flags);
  588. return IRQ_HANDLED;
  589. }
  590. spin_unlock_irqrestore(&tdc->lock, flags);
  591. dev_info(tdc2dev(tdc),
  592. "Interrupt already served status 0x%08lx\n", status);
  593. return IRQ_NONE;
  594. }
  595. static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
  596. {
  597. struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
  598. struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
  599. unsigned long flags;
  600. dma_cookie_t cookie;
  601. spin_lock_irqsave(&tdc->lock, flags);
  602. dma_desc->dma_status = DMA_IN_PROGRESS;
  603. cookie = dma_cookie_assign(&dma_desc->txd);
  604. list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
  605. spin_unlock_irqrestore(&tdc->lock, flags);
  606. return cookie;
  607. }
  608. static void tegra_dma_issue_pending(struct dma_chan *dc)
  609. {
  610. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  611. unsigned long flags;
  612. spin_lock_irqsave(&tdc->lock, flags);
  613. if (list_empty(&tdc->pending_sg_req)) {
  614. dev_err(tdc2dev(tdc), "No DMA request\n");
  615. goto end;
  616. }
  617. if (!tdc->busy) {
  618. tdc_start_head_req(tdc);
  619. /* Continuous single mode: Configure next req */
  620. if (tdc->cyclic) {
  621. /*
  622. * Wait for 1 burst time for configure DMA for
  623. * next transfer.
  624. */
  625. udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
  626. tdc_configure_next_head_desc(tdc);
  627. }
  628. }
  629. end:
  630. spin_unlock_irqrestore(&tdc->lock, flags);
  631. }
  632. static int tegra_dma_terminate_all(struct dma_chan *dc)
  633. {
  634. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  635. struct tegra_dma_sg_req *sgreq;
  636. struct tegra_dma_desc *dma_desc;
  637. unsigned long flags;
  638. unsigned long status;
  639. unsigned long wcount;
  640. bool was_busy;
  641. spin_lock_irqsave(&tdc->lock, flags);
  642. if (!tdc->busy)
  643. goto skip_dma_stop;
  644. /* Pause DMA before checking the queue status */
  645. tegra_dma_pause(tdc, true);
  646. status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
  647. if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
  648. dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
  649. tdc->isr_handler(tdc, true);
  650. status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
  651. }
  652. if (tdc->tdma->chip_data->support_separate_wcount_reg)
  653. wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
  654. else
  655. wcount = status;
  656. was_busy = tdc->busy;
  657. tegra_dma_stop(tdc);
  658. if (!list_empty(&tdc->pending_sg_req) && was_busy) {
  659. sgreq = list_first_entry(&tdc->pending_sg_req,
  660. typeof(*sgreq), node);
  661. sgreq->dma_desc->bytes_transferred +=
  662. get_current_xferred_count(tdc, sgreq, wcount);
  663. }
  664. tegra_dma_resume(tdc);
  665. skip_dma_stop:
  666. tegra_dma_abort_all(tdc);
  667. while (!list_empty(&tdc->cb_desc)) {
  668. dma_desc = list_first_entry(&tdc->cb_desc,
  669. typeof(*dma_desc), cb_node);
  670. list_del(&dma_desc->cb_node);
  671. dma_desc->cb_count = 0;
  672. }
  673. spin_unlock_irqrestore(&tdc->lock, flags);
  674. return 0;
  675. }
  676. static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
  677. dma_cookie_t cookie, struct dma_tx_state *txstate)
  678. {
  679. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  680. struct tegra_dma_desc *dma_desc;
  681. struct tegra_dma_sg_req *sg_req;
  682. enum dma_status ret;
  683. unsigned long flags;
  684. unsigned int residual;
  685. ret = dma_cookie_status(dc, cookie, txstate);
  686. if (ret == DMA_COMPLETE)
  687. return ret;
  688. spin_lock_irqsave(&tdc->lock, flags);
  689. /* Check on wait_ack desc status */
  690. list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
  691. if (dma_desc->txd.cookie == cookie) {
  692. ret = dma_desc->dma_status;
  693. goto found;
  694. }
  695. }
  696. /* Check in pending list */
  697. list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
  698. dma_desc = sg_req->dma_desc;
  699. if (dma_desc->txd.cookie == cookie) {
  700. ret = dma_desc->dma_status;
  701. goto found;
  702. }
  703. }
  704. dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
  705. dma_desc = NULL;
  706. found:
  707. if (dma_desc && txstate) {
  708. residual = dma_desc->bytes_requested -
  709. (dma_desc->bytes_transferred %
  710. dma_desc->bytes_requested);
  711. dma_set_residue(txstate, residual);
  712. }
  713. spin_unlock_irqrestore(&tdc->lock, flags);
  714. return ret;
  715. }
  716. static inline int get_bus_width(struct tegra_dma_channel *tdc,
  717. enum dma_slave_buswidth slave_bw)
  718. {
  719. switch (slave_bw) {
  720. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  721. return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
  722. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  723. return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
  724. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  725. return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
  726. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  727. return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
  728. default:
  729. dev_warn(tdc2dev(tdc),
  730. "slave bw is not supported, using 32bits\n");
  731. return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
  732. }
  733. }
  734. static inline int get_burst_size(struct tegra_dma_channel *tdc,
  735. u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
  736. {
  737. int burst_byte;
  738. int burst_ahb_width;
  739. /*
  740. * burst_size from client is in terms of the bus_width.
  741. * convert them into AHB memory width which is 4 byte.
  742. */
  743. burst_byte = burst_size * slave_bw;
  744. burst_ahb_width = burst_byte / 4;
  745. /* If burst size is 0 then calculate the burst size based on length */
  746. if (!burst_ahb_width) {
  747. if (len & 0xF)
  748. return TEGRA_APBDMA_AHBSEQ_BURST_1;
  749. else if ((len >> 4) & 0x1)
  750. return TEGRA_APBDMA_AHBSEQ_BURST_4;
  751. else
  752. return TEGRA_APBDMA_AHBSEQ_BURST_8;
  753. }
  754. if (burst_ahb_width < 4)
  755. return TEGRA_APBDMA_AHBSEQ_BURST_1;
  756. else if (burst_ahb_width < 8)
  757. return TEGRA_APBDMA_AHBSEQ_BURST_4;
  758. else
  759. return TEGRA_APBDMA_AHBSEQ_BURST_8;
  760. }
  761. static int get_transfer_param(struct tegra_dma_channel *tdc,
  762. enum dma_transfer_direction direction, unsigned long *apb_addr,
  763. unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
  764. enum dma_slave_buswidth *slave_bw)
  765. {
  766. switch (direction) {
  767. case DMA_MEM_TO_DEV:
  768. *apb_addr = tdc->dma_sconfig.dst_addr;
  769. *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
  770. *burst_size = tdc->dma_sconfig.dst_maxburst;
  771. *slave_bw = tdc->dma_sconfig.dst_addr_width;
  772. *csr = TEGRA_APBDMA_CSR_DIR;
  773. return 0;
  774. case DMA_DEV_TO_MEM:
  775. *apb_addr = tdc->dma_sconfig.src_addr;
  776. *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
  777. *burst_size = tdc->dma_sconfig.src_maxburst;
  778. *slave_bw = tdc->dma_sconfig.src_addr_width;
  779. *csr = 0;
  780. return 0;
  781. default:
  782. dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
  783. return -EINVAL;
  784. }
  785. return -EINVAL;
  786. }
  787. static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
  788. struct tegra_dma_channel_regs *ch_regs, u32 len)
  789. {
  790. u32 len_field = (len - 4) & 0xFFFC;
  791. if (tdc->tdma->chip_data->support_separate_wcount_reg)
  792. ch_regs->wcount = len_field;
  793. else
  794. ch_regs->csr |= len_field;
  795. }
  796. static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
  797. struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
  798. enum dma_transfer_direction direction, unsigned long flags,
  799. void *context)
  800. {
  801. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  802. struct tegra_dma_desc *dma_desc;
  803. unsigned int i;
  804. struct scatterlist *sg;
  805. unsigned long csr, ahb_seq, apb_ptr, apb_seq;
  806. struct list_head req_list;
  807. struct tegra_dma_sg_req *sg_req = NULL;
  808. u32 burst_size;
  809. enum dma_slave_buswidth slave_bw;
  810. if (!tdc->config_init) {
  811. dev_err(tdc2dev(tdc), "dma channel is not configured\n");
  812. return NULL;
  813. }
  814. if (sg_len < 1) {
  815. dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
  816. return NULL;
  817. }
  818. if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
  819. &burst_size, &slave_bw) < 0)
  820. return NULL;
  821. INIT_LIST_HEAD(&req_list);
  822. ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
  823. ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
  824. TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
  825. ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
  826. csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
  827. csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
  828. if (flags & DMA_PREP_INTERRUPT)
  829. csr |= TEGRA_APBDMA_CSR_IE_EOC;
  830. apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
  831. dma_desc = tegra_dma_desc_get(tdc);
  832. if (!dma_desc) {
  833. dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
  834. return NULL;
  835. }
  836. INIT_LIST_HEAD(&dma_desc->tx_list);
  837. INIT_LIST_HEAD(&dma_desc->cb_node);
  838. dma_desc->cb_count = 0;
  839. dma_desc->bytes_requested = 0;
  840. dma_desc->bytes_transferred = 0;
  841. dma_desc->dma_status = DMA_IN_PROGRESS;
  842. /* Make transfer requests */
  843. for_each_sg(sgl, sg, sg_len, i) {
  844. u32 len, mem;
  845. mem = sg_dma_address(sg);
  846. len = sg_dma_len(sg);
  847. if ((len & 3) || (mem & 3) ||
  848. (len > tdc->tdma->chip_data->max_dma_count)) {
  849. dev_err(tdc2dev(tdc),
  850. "Dma length/memory address is not supported\n");
  851. tegra_dma_desc_put(tdc, dma_desc);
  852. return NULL;
  853. }
  854. sg_req = tegra_dma_sg_req_get(tdc);
  855. if (!sg_req) {
  856. dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
  857. tegra_dma_desc_put(tdc, dma_desc);
  858. return NULL;
  859. }
  860. ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
  861. dma_desc->bytes_requested += len;
  862. sg_req->ch_regs.apb_ptr = apb_ptr;
  863. sg_req->ch_regs.ahb_ptr = mem;
  864. sg_req->ch_regs.csr = csr;
  865. tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
  866. sg_req->ch_regs.apb_seq = apb_seq;
  867. sg_req->ch_regs.ahb_seq = ahb_seq;
  868. sg_req->configured = false;
  869. sg_req->last_sg = false;
  870. sg_req->dma_desc = dma_desc;
  871. sg_req->req_len = len;
  872. list_add_tail(&sg_req->node, &dma_desc->tx_list);
  873. }
  874. sg_req->last_sg = true;
  875. if (flags & DMA_CTRL_ACK)
  876. dma_desc->txd.flags = DMA_CTRL_ACK;
  877. /*
  878. * Make sure that mode should not be conflicting with currently
  879. * configured mode.
  880. */
  881. if (!tdc->isr_handler) {
  882. tdc->isr_handler = handle_once_dma_done;
  883. tdc->cyclic = false;
  884. } else {
  885. if (tdc->cyclic) {
  886. dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
  887. tegra_dma_desc_put(tdc, dma_desc);
  888. return NULL;
  889. }
  890. }
  891. return &dma_desc->txd;
  892. }
  893. static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
  894. struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
  895. size_t period_len, enum dma_transfer_direction direction,
  896. unsigned long flags)
  897. {
  898. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  899. struct tegra_dma_desc *dma_desc = NULL;
  900. struct tegra_dma_sg_req *sg_req = NULL;
  901. unsigned long csr, ahb_seq, apb_ptr, apb_seq;
  902. int len;
  903. size_t remain_len;
  904. dma_addr_t mem = buf_addr;
  905. u32 burst_size;
  906. enum dma_slave_buswidth slave_bw;
  907. if (!buf_len || !period_len) {
  908. dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
  909. return NULL;
  910. }
  911. if (!tdc->config_init) {
  912. dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
  913. return NULL;
  914. }
  915. /*
  916. * We allow to take more number of requests till DMA is
  917. * not started. The driver will loop over all requests.
  918. * Once DMA is started then new requests can be queued only after
  919. * terminating the DMA.
  920. */
  921. if (tdc->busy) {
  922. dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
  923. return NULL;
  924. }
  925. /*
  926. * We only support cycle transfer when buf_len is multiple of
  927. * period_len.
  928. */
  929. if (buf_len % period_len) {
  930. dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
  931. return NULL;
  932. }
  933. len = period_len;
  934. if ((len & 3) || (buf_addr & 3) ||
  935. (len > tdc->tdma->chip_data->max_dma_count)) {
  936. dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
  937. return NULL;
  938. }
  939. if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
  940. &burst_size, &slave_bw) < 0)
  941. return NULL;
  942. ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
  943. ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
  944. TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
  945. ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
  946. csr |= TEGRA_APBDMA_CSR_FLOW;
  947. if (flags & DMA_PREP_INTERRUPT)
  948. csr |= TEGRA_APBDMA_CSR_IE_EOC;
  949. csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
  950. apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
  951. dma_desc = tegra_dma_desc_get(tdc);
  952. if (!dma_desc) {
  953. dev_err(tdc2dev(tdc), "not enough descriptors available\n");
  954. return NULL;
  955. }
  956. INIT_LIST_HEAD(&dma_desc->tx_list);
  957. INIT_LIST_HEAD(&dma_desc->cb_node);
  958. dma_desc->cb_count = 0;
  959. dma_desc->bytes_transferred = 0;
  960. dma_desc->bytes_requested = buf_len;
  961. remain_len = buf_len;
  962. /* Split transfer equal to period size */
  963. while (remain_len) {
  964. sg_req = tegra_dma_sg_req_get(tdc);
  965. if (!sg_req) {
  966. dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
  967. tegra_dma_desc_put(tdc, dma_desc);
  968. return NULL;
  969. }
  970. ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
  971. sg_req->ch_regs.apb_ptr = apb_ptr;
  972. sg_req->ch_regs.ahb_ptr = mem;
  973. sg_req->ch_regs.csr = csr;
  974. tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
  975. sg_req->ch_regs.apb_seq = apb_seq;
  976. sg_req->ch_regs.ahb_seq = ahb_seq;
  977. sg_req->configured = false;
  978. sg_req->last_sg = false;
  979. sg_req->dma_desc = dma_desc;
  980. sg_req->req_len = len;
  981. list_add_tail(&sg_req->node, &dma_desc->tx_list);
  982. remain_len -= len;
  983. mem += len;
  984. }
  985. sg_req->last_sg = true;
  986. if (flags & DMA_CTRL_ACK)
  987. dma_desc->txd.flags = DMA_CTRL_ACK;
  988. /*
  989. * Make sure that mode should not be conflicting with currently
  990. * configured mode.
  991. */
  992. if (!tdc->isr_handler) {
  993. tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
  994. tdc->cyclic = true;
  995. } else {
  996. if (!tdc->cyclic) {
  997. dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
  998. tegra_dma_desc_put(tdc, dma_desc);
  999. return NULL;
  1000. }
  1001. }
  1002. return &dma_desc->txd;
  1003. }
  1004. static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
  1005. {
  1006. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  1007. struct tegra_dma *tdma = tdc->tdma;
  1008. int ret;
  1009. dma_cookie_init(&tdc->dma_chan);
  1010. tdc->config_init = false;
  1011. ret = pm_runtime_get_sync(tdma->dev);
  1012. if (ret < 0)
  1013. return ret;
  1014. return 0;
  1015. }
  1016. static void tegra_dma_free_chan_resources(struct dma_chan *dc)
  1017. {
  1018. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  1019. struct tegra_dma *tdma = tdc->tdma;
  1020. struct tegra_dma_desc *dma_desc;
  1021. struct tegra_dma_sg_req *sg_req;
  1022. struct list_head dma_desc_list;
  1023. struct list_head sg_req_list;
  1024. unsigned long flags;
  1025. INIT_LIST_HEAD(&dma_desc_list);
  1026. INIT_LIST_HEAD(&sg_req_list);
  1027. dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
  1028. tegra_dma_terminate_all(dc);
  1029. spin_lock_irqsave(&tdc->lock, flags);
  1030. list_splice_init(&tdc->pending_sg_req, &sg_req_list);
  1031. list_splice_init(&tdc->free_sg_req, &sg_req_list);
  1032. list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
  1033. INIT_LIST_HEAD(&tdc->cb_desc);
  1034. tdc->config_init = false;
  1035. tdc->isr_handler = NULL;
  1036. spin_unlock_irqrestore(&tdc->lock, flags);
  1037. while (!list_empty(&dma_desc_list)) {
  1038. dma_desc = list_first_entry(&dma_desc_list,
  1039. typeof(*dma_desc), node);
  1040. list_del(&dma_desc->node);
  1041. kfree(dma_desc);
  1042. }
  1043. while (!list_empty(&sg_req_list)) {
  1044. sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
  1045. list_del(&sg_req->node);
  1046. kfree(sg_req);
  1047. }
  1048. pm_runtime_put(tdma->dev);
  1049. tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
  1050. }
  1051. static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
  1052. struct of_dma *ofdma)
  1053. {
  1054. struct tegra_dma *tdma = ofdma->of_dma_data;
  1055. struct dma_chan *chan;
  1056. struct tegra_dma_channel *tdc;
  1057. if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
  1058. dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]);
  1059. return NULL;
  1060. }
  1061. chan = dma_get_any_slave_channel(&tdma->dma_dev);
  1062. if (!chan)
  1063. return NULL;
  1064. tdc = to_tegra_dma_chan(chan);
  1065. tdc->slave_id = dma_spec->args[0];
  1066. return chan;
  1067. }
  1068. /* Tegra20 specific DMA controller information */
  1069. static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
  1070. .nr_channels = 16,
  1071. .channel_reg_size = 0x20,
  1072. .max_dma_count = 1024UL * 64,
  1073. .support_channel_pause = false,
  1074. .support_separate_wcount_reg = false,
  1075. };
  1076. /* Tegra30 specific DMA controller information */
  1077. static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
  1078. .nr_channels = 32,
  1079. .channel_reg_size = 0x20,
  1080. .max_dma_count = 1024UL * 64,
  1081. .support_channel_pause = false,
  1082. .support_separate_wcount_reg = false,
  1083. };
  1084. /* Tegra114 specific DMA controller information */
  1085. static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
  1086. .nr_channels = 32,
  1087. .channel_reg_size = 0x20,
  1088. .max_dma_count = 1024UL * 64,
  1089. .support_channel_pause = true,
  1090. .support_separate_wcount_reg = false,
  1091. };
  1092. /* Tegra148 specific DMA controller information */
  1093. static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
  1094. .nr_channels = 32,
  1095. .channel_reg_size = 0x40,
  1096. .max_dma_count = 1024UL * 64,
  1097. .support_channel_pause = true,
  1098. .support_separate_wcount_reg = true,
  1099. };
  1100. static int tegra_dma_probe(struct platform_device *pdev)
  1101. {
  1102. struct resource *res;
  1103. struct tegra_dma *tdma;
  1104. int ret;
  1105. int i;
  1106. const struct tegra_dma_chip_data *cdata;
  1107. cdata = of_device_get_match_data(&pdev->dev);
  1108. if (!cdata) {
  1109. dev_err(&pdev->dev, "Error: No device match data found\n");
  1110. return -ENODEV;
  1111. }
  1112. tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
  1113. sizeof(struct tegra_dma_channel), GFP_KERNEL);
  1114. if (!tdma)
  1115. return -ENOMEM;
  1116. tdma->dev = &pdev->dev;
  1117. tdma->chip_data = cdata;
  1118. platform_set_drvdata(pdev, tdma);
  1119. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1120. tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
  1121. if (IS_ERR(tdma->base_addr))
  1122. return PTR_ERR(tdma->base_addr);
  1123. tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
  1124. if (IS_ERR(tdma->dma_clk)) {
  1125. dev_err(&pdev->dev, "Error: Missing controller clock\n");
  1126. return PTR_ERR(tdma->dma_clk);
  1127. }
  1128. tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
  1129. if (IS_ERR(tdma->rst)) {
  1130. dev_err(&pdev->dev, "Error: Missing reset\n");
  1131. return PTR_ERR(tdma->rst);
  1132. }
  1133. spin_lock_init(&tdma->global_lock);
  1134. pm_runtime_enable(&pdev->dev);
  1135. if (!pm_runtime_enabled(&pdev->dev))
  1136. ret = tegra_dma_runtime_resume(&pdev->dev);
  1137. else
  1138. ret = pm_runtime_get_sync(&pdev->dev);
  1139. if (ret < 0) {
  1140. pm_runtime_disable(&pdev->dev);
  1141. return ret;
  1142. }
  1143. /* Reset DMA controller */
  1144. reset_control_assert(tdma->rst);
  1145. udelay(2);
  1146. reset_control_deassert(tdma->rst);
  1147. /* Enable global DMA registers */
  1148. tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
  1149. tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
  1150. tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
  1151. pm_runtime_put(&pdev->dev);
  1152. INIT_LIST_HEAD(&tdma->dma_dev.channels);
  1153. for (i = 0; i < cdata->nr_channels; i++) {
  1154. struct tegra_dma_channel *tdc = &tdma->channels[i];
  1155. tdc->chan_addr = tdma->base_addr +
  1156. TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
  1157. (i * cdata->channel_reg_size);
  1158. res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
  1159. if (!res) {
  1160. ret = -EINVAL;
  1161. dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
  1162. goto err_irq;
  1163. }
  1164. tdc->irq = res->start;
  1165. snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
  1166. ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc);
  1167. if (ret) {
  1168. dev_err(&pdev->dev,
  1169. "request_irq failed with err %d channel %d\n",
  1170. ret, i);
  1171. goto err_irq;
  1172. }
  1173. tdc->dma_chan.device = &tdma->dma_dev;
  1174. dma_cookie_init(&tdc->dma_chan);
  1175. list_add_tail(&tdc->dma_chan.device_node,
  1176. &tdma->dma_dev.channels);
  1177. tdc->tdma = tdma;
  1178. tdc->id = i;
  1179. tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
  1180. tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
  1181. (unsigned long)tdc);
  1182. spin_lock_init(&tdc->lock);
  1183. INIT_LIST_HEAD(&tdc->pending_sg_req);
  1184. INIT_LIST_HEAD(&tdc->free_sg_req);
  1185. INIT_LIST_HEAD(&tdc->free_dma_desc);
  1186. INIT_LIST_HEAD(&tdc->cb_desc);
  1187. }
  1188. dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
  1189. dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
  1190. dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
  1191. tdma->global_pause_count = 0;
  1192. tdma->dma_dev.dev = &pdev->dev;
  1193. tdma->dma_dev.device_alloc_chan_resources =
  1194. tegra_dma_alloc_chan_resources;
  1195. tdma->dma_dev.device_free_chan_resources =
  1196. tegra_dma_free_chan_resources;
  1197. tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
  1198. tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
  1199. tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  1200. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  1201. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
  1202. BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
  1203. tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  1204. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  1205. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
  1206. BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
  1207. tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  1208. /*
  1209. * XXX The hardware appears to support
  1210. * DMA_RESIDUE_GRANULARITY_BURST-level reporting, but it's
  1211. * only used by this driver during tegra_dma_terminate_all()
  1212. */
  1213. tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
  1214. tdma->dma_dev.device_config = tegra_dma_slave_config;
  1215. tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
  1216. tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
  1217. tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
  1218. ret = dma_async_device_register(&tdma->dma_dev);
  1219. if (ret < 0) {
  1220. dev_err(&pdev->dev,
  1221. "Tegra20 APB DMA driver registration failed %d\n", ret);
  1222. goto err_irq;
  1223. }
  1224. ret = of_dma_controller_register(pdev->dev.of_node,
  1225. tegra_dma_of_xlate, tdma);
  1226. if (ret < 0) {
  1227. dev_err(&pdev->dev,
  1228. "Tegra20 APB DMA OF registration failed %d\n", ret);
  1229. goto err_unregister_dma_dev;
  1230. }
  1231. dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
  1232. cdata->nr_channels);
  1233. return 0;
  1234. err_unregister_dma_dev:
  1235. dma_async_device_unregister(&tdma->dma_dev);
  1236. err_irq:
  1237. while (--i >= 0) {
  1238. struct tegra_dma_channel *tdc = &tdma->channels[i];
  1239. free_irq(tdc->irq, tdc);
  1240. tasklet_kill(&tdc->tasklet);
  1241. }
  1242. pm_runtime_disable(&pdev->dev);
  1243. if (!pm_runtime_status_suspended(&pdev->dev))
  1244. tegra_dma_runtime_suspend(&pdev->dev);
  1245. return ret;
  1246. }
  1247. static int tegra_dma_remove(struct platform_device *pdev)
  1248. {
  1249. struct tegra_dma *tdma = platform_get_drvdata(pdev);
  1250. int i;
  1251. struct tegra_dma_channel *tdc;
  1252. dma_async_device_unregister(&tdma->dma_dev);
  1253. for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
  1254. tdc = &tdma->channels[i];
  1255. free_irq(tdc->irq, tdc);
  1256. tasklet_kill(&tdc->tasklet);
  1257. }
  1258. pm_runtime_disable(&pdev->dev);
  1259. if (!pm_runtime_status_suspended(&pdev->dev))
  1260. tegra_dma_runtime_suspend(&pdev->dev);
  1261. return 0;
  1262. }
  1263. static int tegra_dma_runtime_suspend(struct device *dev)
  1264. {
  1265. struct tegra_dma *tdma = dev_get_drvdata(dev);
  1266. int i;
  1267. tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
  1268. for (i = 0; i < tdma->chip_data->nr_channels; i++) {
  1269. struct tegra_dma_channel *tdc = &tdma->channels[i];
  1270. struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
  1271. /* Only save the state of DMA channels that are in use */
  1272. if (!tdc->config_init)
  1273. continue;
  1274. ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
  1275. ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
  1276. ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
  1277. ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
  1278. ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
  1279. if (tdma->chip_data->support_separate_wcount_reg)
  1280. ch_reg->wcount = tdc_read(tdc,
  1281. TEGRA_APBDMA_CHAN_WCOUNT);
  1282. }
  1283. clk_disable_unprepare(tdma->dma_clk);
  1284. return 0;
  1285. }
  1286. static int tegra_dma_runtime_resume(struct device *dev)
  1287. {
  1288. struct tegra_dma *tdma = dev_get_drvdata(dev);
  1289. int i, ret;
  1290. ret = clk_prepare_enable(tdma->dma_clk);
  1291. if (ret < 0) {
  1292. dev_err(dev, "clk_enable failed: %d\n", ret);
  1293. return ret;
  1294. }
  1295. tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
  1296. tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
  1297. tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
  1298. for (i = 0; i < tdma->chip_data->nr_channels; i++) {
  1299. struct tegra_dma_channel *tdc = &tdma->channels[i];
  1300. struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
  1301. /* Only restore the state of DMA channels that are in use */
  1302. if (!tdc->config_init)
  1303. continue;
  1304. if (tdma->chip_data->support_separate_wcount_reg)
  1305. tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
  1306. ch_reg->wcount);
  1307. tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
  1308. tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
  1309. tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
  1310. tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
  1311. tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
  1312. (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
  1313. }
  1314. return 0;
  1315. }
  1316. static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
  1317. SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
  1318. NULL)
  1319. SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
  1320. pm_runtime_force_resume)
  1321. };
  1322. static const struct of_device_id tegra_dma_of_match[] = {
  1323. {
  1324. .compatible = "nvidia,tegra148-apbdma",
  1325. .data = &tegra148_dma_chip_data,
  1326. }, {
  1327. .compatible = "nvidia,tegra114-apbdma",
  1328. .data = &tegra114_dma_chip_data,
  1329. }, {
  1330. .compatible = "nvidia,tegra30-apbdma",
  1331. .data = &tegra30_dma_chip_data,
  1332. }, {
  1333. .compatible = "nvidia,tegra20-apbdma",
  1334. .data = &tegra20_dma_chip_data,
  1335. }, {
  1336. },
  1337. };
  1338. MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
  1339. static struct platform_driver tegra_dmac_driver = {
  1340. .driver = {
  1341. .name = "tegra-apbdma",
  1342. .pm = &tegra_dma_dev_pm_ops,
  1343. .of_match_table = tegra_dma_of_match,
  1344. },
  1345. .probe = tegra_dma_probe,
  1346. .remove = tegra_dma_remove,
  1347. };
  1348. module_platform_driver(tegra_dmac_driver);
  1349. MODULE_ALIAS("platform:tegra20-apbdma");
  1350. MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
  1351. MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
  1352. MODULE_LICENSE("GPL v2");