ep93xx_dma.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418
  1. /*
  2. * Driver for the Cirrus Logic EP93xx DMA Controller
  3. *
  4. * Copyright (C) 2011 Mika Westerberg
  5. *
  6. * DMA M2P implementation is based on the original
  7. * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
  8. *
  9. * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
  10. * Copyright (C) 2006 Applied Data Systems
  11. * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
  12. *
  13. * This driver is based on dw_dmac and amba-pl08x drivers.
  14. *
  15. * This program is free software; you can redistribute it and/or modify
  16. * it under the terms of the GNU General Public License as published by
  17. * the Free Software Foundation; either version 2 of the License, or
  18. * (at your option) any later version.
  19. */
  20. #include <linux/clk.h>
  21. #include <linux/init.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/module.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/slab.h>
  27. #include <linux/platform_data/dma-ep93xx.h>
  28. #include "dmaengine.h"
  29. /* M2P registers */
  30. #define M2P_CONTROL 0x0000
  31. #define M2P_CONTROL_STALLINT BIT(0)
  32. #define M2P_CONTROL_NFBINT BIT(1)
  33. #define M2P_CONTROL_CH_ERROR_INT BIT(3)
  34. #define M2P_CONTROL_ENABLE BIT(4)
  35. #define M2P_CONTROL_ICE BIT(6)
  36. #define M2P_INTERRUPT 0x0004
  37. #define M2P_INTERRUPT_STALL BIT(0)
  38. #define M2P_INTERRUPT_NFB BIT(1)
  39. #define M2P_INTERRUPT_ERROR BIT(3)
  40. #define M2P_PPALLOC 0x0008
  41. #define M2P_STATUS 0x000c
  42. #define M2P_MAXCNT0 0x0020
  43. #define M2P_BASE0 0x0024
  44. #define M2P_MAXCNT1 0x0030
  45. #define M2P_BASE1 0x0034
  46. #define M2P_STATE_IDLE 0
  47. #define M2P_STATE_STALL 1
  48. #define M2P_STATE_ON 2
  49. #define M2P_STATE_NEXT 3
  50. /* M2M registers */
  51. #define M2M_CONTROL 0x0000
  52. #define M2M_CONTROL_DONEINT BIT(2)
  53. #define M2M_CONTROL_ENABLE BIT(3)
  54. #define M2M_CONTROL_START BIT(4)
  55. #define M2M_CONTROL_DAH BIT(11)
  56. #define M2M_CONTROL_SAH BIT(12)
  57. #define M2M_CONTROL_PW_SHIFT 9
  58. #define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
  59. #define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
  60. #define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
  61. #define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
  62. #define M2M_CONTROL_TM_SHIFT 13
  63. #define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
  64. #define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
  65. #define M2M_CONTROL_NFBINT BIT(21)
  66. #define M2M_CONTROL_RSS_SHIFT 22
  67. #define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
  68. #define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
  69. #define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
  70. #define M2M_CONTROL_NO_HDSK BIT(24)
  71. #define M2M_CONTROL_PWSC_SHIFT 25
  72. #define M2M_INTERRUPT 0x0004
  73. #define M2M_INTERRUPT_MASK 6
  74. #define M2M_STATUS 0x000c
  75. #define M2M_STATUS_CTL_SHIFT 1
  76. #define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
  77. #define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
  78. #define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
  79. #define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
  80. #define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
  81. #define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
  82. #define M2M_STATUS_BUF_SHIFT 4
  83. #define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
  84. #define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
  85. #define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
  86. #define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
  87. #define M2M_STATUS_DONE BIT(6)
  88. #define M2M_BCR0 0x0010
  89. #define M2M_BCR1 0x0014
  90. #define M2M_SAR_BASE0 0x0018
  91. #define M2M_SAR_BASE1 0x001c
  92. #define M2M_DAR_BASE0 0x002c
  93. #define M2M_DAR_BASE1 0x0030
  94. #define DMA_MAX_CHAN_BYTES 0xffff
  95. #define DMA_MAX_CHAN_DESCRIPTORS 32
  96. struct ep93xx_dma_engine;
  97. /**
  98. * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
  99. * @src_addr: source address of the transaction
  100. * @dst_addr: destination address of the transaction
  101. * @size: size of the transaction (in bytes)
  102. * @complete: this descriptor is completed
  103. * @txd: dmaengine API descriptor
  104. * @tx_list: list of linked descriptors
  105. * @node: link used for putting this into a channel queue
  106. */
  107. struct ep93xx_dma_desc {
  108. u32 src_addr;
  109. u32 dst_addr;
  110. size_t size;
  111. bool complete;
  112. struct dma_async_tx_descriptor txd;
  113. struct list_head tx_list;
  114. struct list_head node;
  115. };
  116. /**
  117. * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
  118. * @chan: dmaengine API channel
  119. * @edma: pointer to to the engine device
  120. * @regs: memory mapped registers
  121. * @irq: interrupt number of the channel
  122. * @clk: clock used by this channel
  123. * @tasklet: channel specific tasklet used for callbacks
  124. * @lock: lock protecting the fields following
  125. * @flags: flags for the channel
  126. * @buffer: which buffer to use next (0/1)
  127. * @active: flattened chain of descriptors currently being processed
  128. * @queue: pending descriptors which are handled next
  129. * @free_list: list of free descriptors which can be used
  130. * @runtime_addr: physical address currently used as dest/src (M2M only). This
  131. * is set via .device_config before slave operation is
  132. * prepared
  133. * @runtime_ctrl: M2M runtime values for the control register.
  134. *
  135. * As EP93xx DMA controller doesn't support real chained DMA descriptors we
  136. * will have slightly different scheme here: @active points to a head of
  137. * flattened DMA descriptor chain.
  138. *
  139. * @queue holds pending transactions. These are linked through the first
  140. * descriptor in the chain. When a descriptor is moved to the @active queue,
  141. * the first and chained descriptors are flattened into a single list.
  142. *
  143. * @chan.private holds pointer to &struct ep93xx_dma_data which contains
  144. * necessary channel configuration information. For memcpy channels this must
  145. * be %NULL.
  146. */
  147. struct ep93xx_dma_chan {
  148. struct dma_chan chan;
  149. const struct ep93xx_dma_engine *edma;
  150. void __iomem *regs;
  151. int irq;
  152. struct clk *clk;
  153. struct tasklet_struct tasklet;
  154. /* protects the fields following */
  155. spinlock_t lock;
  156. unsigned long flags;
  157. /* Channel is configured for cyclic transfers */
  158. #define EP93XX_DMA_IS_CYCLIC 0
  159. int buffer;
  160. struct list_head active;
  161. struct list_head queue;
  162. struct list_head free_list;
  163. u32 runtime_addr;
  164. u32 runtime_ctrl;
  165. };
  166. /**
  167. * struct ep93xx_dma_engine - the EP93xx DMA engine instance
  168. * @dma_dev: holds the dmaengine device
  169. * @m2m: is this an M2M or M2P device
  170. * @hw_setup: method which sets the channel up for operation
  171. * @hw_shutdown: shuts the channel down and flushes whatever is left
  172. * @hw_submit: pushes active descriptor(s) to the hardware
  173. * @hw_interrupt: handle the interrupt
  174. * @num_channels: number of channels for this instance
  175. * @channels: array of channels
  176. *
  177. * There is one instance of this struct for the M2P channels and one for the
  178. * M2M channels. hw_xxx() methods are used to perform operations which are
  179. * different on M2M and M2P channels. These methods are called with channel
  180. * lock held and interrupts disabled so they cannot sleep.
  181. */
  182. struct ep93xx_dma_engine {
  183. struct dma_device dma_dev;
  184. bool m2m;
  185. int (*hw_setup)(struct ep93xx_dma_chan *);
  186. void (*hw_synchronize)(struct ep93xx_dma_chan *);
  187. void (*hw_shutdown)(struct ep93xx_dma_chan *);
  188. void (*hw_submit)(struct ep93xx_dma_chan *);
  189. int (*hw_interrupt)(struct ep93xx_dma_chan *);
  190. #define INTERRUPT_UNKNOWN 0
  191. #define INTERRUPT_DONE 1
  192. #define INTERRUPT_NEXT_BUFFER 2
  193. size_t num_channels;
  194. struct ep93xx_dma_chan channels[];
  195. };
  196. static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
  197. {
  198. return &edmac->chan.dev->device;
  199. }
  200. static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
  201. {
  202. return container_of(chan, struct ep93xx_dma_chan, chan);
  203. }
  204. /**
  205. * ep93xx_dma_set_active - set new active descriptor chain
  206. * @edmac: channel
  207. * @desc: head of the new active descriptor chain
  208. *
  209. * Sets @desc to be the head of the new active descriptor chain. This is the
  210. * chain which is processed next. The active list must be empty before calling
  211. * this function.
  212. *
  213. * Called with @edmac->lock held and interrupts disabled.
  214. */
  215. static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
  216. struct ep93xx_dma_desc *desc)
  217. {
  218. BUG_ON(!list_empty(&edmac->active));
  219. list_add_tail(&desc->node, &edmac->active);
  220. /* Flatten the @desc->tx_list chain into @edmac->active list */
  221. while (!list_empty(&desc->tx_list)) {
  222. struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
  223. struct ep93xx_dma_desc, node);
  224. /*
  225. * We copy the callback parameters from the first descriptor
  226. * to all the chained descriptors. This way we can call the
  227. * callback without having to find out the first descriptor in
  228. * the chain. Useful for cyclic transfers.
  229. */
  230. d->txd.callback = desc->txd.callback;
  231. d->txd.callback_param = desc->txd.callback_param;
  232. list_move_tail(&d->node, &edmac->active);
  233. }
  234. }
  235. /* Called with @edmac->lock held and interrupts disabled */
  236. static struct ep93xx_dma_desc *
  237. ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
  238. {
  239. return list_first_entry_or_null(&edmac->active,
  240. struct ep93xx_dma_desc, node);
  241. }
  242. /**
  243. * ep93xx_dma_advance_active - advances to the next active descriptor
  244. * @edmac: channel
  245. *
  246. * Function advances active descriptor to the next in the @edmac->active and
  247. * returns %true if we still have descriptors in the chain to process.
  248. * Otherwise returns %false.
  249. *
  250. * When the channel is in cyclic mode always returns %true.
  251. *
  252. * Called with @edmac->lock held and interrupts disabled.
  253. */
  254. static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
  255. {
  256. struct ep93xx_dma_desc *desc;
  257. list_rotate_left(&edmac->active);
  258. if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
  259. return true;
  260. desc = ep93xx_dma_get_active(edmac);
  261. if (!desc)
  262. return false;
  263. /*
  264. * If txd.cookie is set it means that we are back in the first
  265. * descriptor in the chain and hence done with it.
  266. */
  267. return !desc->txd.cookie;
  268. }
  269. /*
  270. * M2P DMA implementation
  271. */
  272. static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
  273. {
  274. writel(control, edmac->regs + M2P_CONTROL);
  275. /*
  276. * EP93xx User's Guide states that we must perform a dummy read after
  277. * write to the control register.
  278. */
  279. readl(edmac->regs + M2P_CONTROL);
  280. }
  281. static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
  282. {
  283. struct ep93xx_dma_data *data = edmac->chan.private;
  284. u32 control;
  285. writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
  286. control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
  287. | M2P_CONTROL_ENABLE;
  288. m2p_set_control(edmac, control);
  289. edmac->buffer = 0;
  290. return 0;
  291. }
  292. static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
  293. {
  294. return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
  295. }
  296. static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
  297. {
  298. unsigned long flags;
  299. u32 control;
  300. spin_lock_irqsave(&edmac->lock, flags);
  301. control = readl(edmac->regs + M2P_CONTROL);
  302. control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
  303. m2p_set_control(edmac, control);
  304. spin_unlock_irqrestore(&edmac->lock, flags);
  305. while (m2p_channel_state(edmac) >= M2P_STATE_ON)
  306. schedule();
  307. }
  308. static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
  309. {
  310. m2p_set_control(edmac, 0);
  311. while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
  312. dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
  313. }
  314. static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
  315. {
  316. struct ep93xx_dma_desc *desc;
  317. u32 bus_addr;
  318. desc = ep93xx_dma_get_active(edmac);
  319. if (!desc) {
  320. dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
  321. return;
  322. }
  323. if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
  324. bus_addr = desc->src_addr;
  325. else
  326. bus_addr = desc->dst_addr;
  327. if (edmac->buffer == 0) {
  328. writel(desc->size, edmac->regs + M2P_MAXCNT0);
  329. writel(bus_addr, edmac->regs + M2P_BASE0);
  330. } else {
  331. writel(desc->size, edmac->regs + M2P_MAXCNT1);
  332. writel(bus_addr, edmac->regs + M2P_BASE1);
  333. }
  334. edmac->buffer ^= 1;
  335. }
  336. static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
  337. {
  338. u32 control = readl(edmac->regs + M2P_CONTROL);
  339. m2p_fill_desc(edmac);
  340. control |= M2P_CONTROL_STALLINT;
  341. if (ep93xx_dma_advance_active(edmac)) {
  342. m2p_fill_desc(edmac);
  343. control |= M2P_CONTROL_NFBINT;
  344. }
  345. m2p_set_control(edmac, control);
  346. }
  347. static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
  348. {
  349. u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
  350. u32 control;
  351. if (irq_status & M2P_INTERRUPT_ERROR) {
  352. struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
  353. /* Clear the error interrupt */
  354. writel(1, edmac->regs + M2P_INTERRUPT);
  355. /*
  356. * It seems that there is no easy way of reporting errors back
  357. * to client so we just report the error here and continue as
  358. * usual.
  359. *
  360. * Revisit this when there is a mechanism to report back the
  361. * errors.
  362. */
  363. dev_err(chan2dev(edmac),
  364. "DMA transfer failed! Details:\n"
  365. "\tcookie : %d\n"
  366. "\tsrc_addr : 0x%08x\n"
  367. "\tdst_addr : 0x%08x\n"
  368. "\tsize : %zu\n",
  369. desc->txd.cookie, desc->src_addr, desc->dst_addr,
  370. desc->size);
  371. }
  372. /*
  373. * Even latest E2 silicon revision sometimes assert STALL interrupt
  374. * instead of NFB. Therefore we treat them equally, basing on the
  375. * amount of data we still have to transfer.
  376. */
  377. if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
  378. return INTERRUPT_UNKNOWN;
  379. if (ep93xx_dma_advance_active(edmac)) {
  380. m2p_fill_desc(edmac);
  381. return INTERRUPT_NEXT_BUFFER;
  382. }
  383. /* Disable interrupts */
  384. control = readl(edmac->regs + M2P_CONTROL);
  385. control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
  386. m2p_set_control(edmac, control);
  387. return INTERRUPT_DONE;
  388. }
  389. /*
  390. * M2M DMA implementation
  391. */
  392. static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
  393. {
  394. const struct ep93xx_dma_data *data = edmac->chan.private;
  395. u32 control = 0;
  396. if (!data) {
  397. /* This is memcpy channel, nothing to configure */
  398. writel(control, edmac->regs + M2M_CONTROL);
  399. return 0;
  400. }
  401. switch (data->port) {
  402. case EP93XX_DMA_SSP:
  403. /*
  404. * This was found via experimenting - anything less than 5
  405. * causes the channel to perform only a partial transfer which
  406. * leads to problems since we don't get DONE interrupt then.
  407. */
  408. control = (5 << M2M_CONTROL_PWSC_SHIFT);
  409. control |= M2M_CONTROL_NO_HDSK;
  410. if (data->direction == DMA_MEM_TO_DEV) {
  411. control |= M2M_CONTROL_DAH;
  412. control |= M2M_CONTROL_TM_TX;
  413. control |= M2M_CONTROL_RSS_SSPTX;
  414. } else {
  415. control |= M2M_CONTROL_SAH;
  416. control |= M2M_CONTROL_TM_RX;
  417. control |= M2M_CONTROL_RSS_SSPRX;
  418. }
  419. break;
  420. case EP93XX_DMA_IDE:
  421. /*
  422. * This IDE part is totally untested. Values below are taken
  423. * from the EP93xx Users's Guide and might not be correct.
  424. */
  425. if (data->direction == DMA_MEM_TO_DEV) {
  426. /* Worst case from the UG */
  427. control = (3 << M2M_CONTROL_PWSC_SHIFT);
  428. control |= M2M_CONTROL_DAH;
  429. control |= M2M_CONTROL_TM_TX;
  430. } else {
  431. control = (2 << M2M_CONTROL_PWSC_SHIFT);
  432. control |= M2M_CONTROL_SAH;
  433. control |= M2M_CONTROL_TM_RX;
  434. }
  435. control |= M2M_CONTROL_NO_HDSK;
  436. control |= M2M_CONTROL_RSS_IDE;
  437. control |= M2M_CONTROL_PW_16;
  438. break;
  439. default:
  440. return -EINVAL;
  441. }
  442. writel(control, edmac->regs + M2M_CONTROL);
  443. return 0;
  444. }
  445. static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
  446. {
  447. /* Just disable the channel */
  448. writel(0, edmac->regs + M2M_CONTROL);
  449. }
  450. static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
  451. {
  452. struct ep93xx_dma_desc *desc;
  453. desc = ep93xx_dma_get_active(edmac);
  454. if (!desc) {
  455. dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
  456. return;
  457. }
  458. if (edmac->buffer == 0) {
  459. writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
  460. writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
  461. writel(desc->size, edmac->regs + M2M_BCR0);
  462. } else {
  463. writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
  464. writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
  465. writel(desc->size, edmac->regs + M2M_BCR1);
  466. }
  467. edmac->buffer ^= 1;
  468. }
  469. static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
  470. {
  471. struct ep93xx_dma_data *data = edmac->chan.private;
  472. u32 control = readl(edmac->regs + M2M_CONTROL);
  473. /*
  474. * Since we allow clients to configure PW (peripheral width) we always
  475. * clear PW bits here and then set them according what is given in
  476. * the runtime configuration.
  477. */
  478. control &= ~M2M_CONTROL_PW_MASK;
  479. control |= edmac->runtime_ctrl;
  480. m2m_fill_desc(edmac);
  481. control |= M2M_CONTROL_DONEINT;
  482. if (ep93xx_dma_advance_active(edmac)) {
  483. m2m_fill_desc(edmac);
  484. control |= M2M_CONTROL_NFBINT;
  485. }
  486. /*
  487. * Now we can finally enable the channel. For M2M channel this must be
  488. * done _after_ the BCRx registers are programmed.
  489. */
  490. control |= M2M_CONTROL_ENABLE;
  491. writel(control, edmac->regs + M2M_CONTROL);
  492. if (!data) {
  493. /*
  494. * For memcpy channels the software trigger must be asserted
  495. * in order to start the memcpy operation.
  496. */
  497. control |= M2M_CONTROL_START;
  498. writel(control, edmac->regs + M2M_CONTROL);
  499. }
  500. }
  501. /*
  502. * According to EP93xx User's Guide, we should receive DONE interrupt when all
  503. * M2M DMA controller transactions complete normally. This is not always the
  504. * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
  505. * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
  506. * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
  507. * In effect, disabling the channel when only DONE bit is set could stop
  508. * currently running DMA transfer. To avoid this, we use Buffer FSM and
  509. * Control FSM to check current state of DMA channel.
  510. */
  511. static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
  512. {
  513. u32 status = readl(edmac->regs + M2M_STATUS);
  514. u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
  515. u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
  516. bool done = status & M2M_STATUS_DONE;
  517. bool last_done;
  518. u32 control;
  519. struct ep93xx_dma_desc *desc;
  520. /* Accept only DONE and NFB interrupts */
  521. if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
  522. return INTERRUPT_UNKNOWN;
  523. if (done) {
  524. /* Clear the DONE bit */
  525. writel(0, edmac->regs + M2M_INTERRUPT);
  526. }
  527. /*
  528. * Check whether we are done with descriptors or not. This, together
  529. * with DMA channel state, determines action to take in interrupt.
  530. */
  531. desc = ep93xx_dma_get_active(edmac);
  532. last_done = !desc || desc->txd.cookie;
  533. /*
  534. * Use M2M DMA Buffer FSM and Control FSM to check current state of
  535. * DMA channel. Using DONE and NFB bits from channel status register
  536. * or bits from channel interrupt register is not reliable.
  537. */
  538. if (!last_done &&
  539. (buf_fsm == M2M_STATUS_BUF_NO ||
  540. buf_fsm == M2M_STATUS_BUF_ON)) {
  541. /*
  542. * Two buffers are ready for update when Buffer FSM is in
  543. * DMA_NO_BUF state. Only one buffer can be prepared without
  544. * disabling the channel or polling the DONE bit.
  545. * To simplify things, always prepare only one buffer.
  546. */
  547. if (ep93xx_dma_advance_active(edmac)) {
  548. m2m_fill_desc(edmac);
  549. if (done && !edmac->chan.private) {
  550. /* Software trigger for memcpy channel */
  551. control = readl(edmac->regs + M2M_CONTROL);
  552. control |= M2M_CONTROL_START;
  553. writel(control, edmac->regs + M2M_CONTROL);
  554. }
  555. return INTERRUPT_NEXT_BUFFER;
  556. } else {
  557. last_done = true;
  558. }
  559. }
  560. /*
  561. * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
  562. * and Control FSM is in DMA_STALL state.
  563. */
  564. if (last_done &&
  565. buf_fsm == M2M_STATUS_BUF_NO &&
  566. ctl_fsm == M2M_STATUS_CTL_STALL) {
  567. /* Disable interrupts and the channel */
  568. control = readl(edmac->regs + M2M_CONTROL);
  569. control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
  570. | M2M_CONTROL_ENABLE);
  571. writel(control, edmac->regs + M2M_CONTROL);
  572. return INTERRUPT_DONE;
  573. }
  574. /*
  575. * Nothing to do this time.
  576. */
  577. return INTERRUPT_NEXT_BUFFER;
  578. }
  579. /*
  580. * DMA engine API implementation
  581. */
  582. static struct ep93xx_dma_desc *
  583. ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
  584. {
  585. struct ep93xx_dma_desc *desc, *_desc;
  586. struct ep93xx_dma_desc *ret = NULL;
  587. unsigned long flags;
  588. spin_lock_irqsave(&edmac->lock, flags);
  589. list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
  590. if (async_tx_test_ack(&desc->txd)) {
  591. list_del_init(&desc->node);
  592. /* Re-initialize the descriptor */
  593. desc->src_addr = 0;
  594. desc->dst_addr = 0;
  595. desc->size = 0;
  596. desc->complete = false;
  597. desc->txd.cookie = 0;
  598. desc->txd.callback = NULL;
  599. desc->txd.callback_param = NULL;
  600. ret = desc;
  601. break;
  602. }
  603. }
  604. spin_unlock_irqrestore(&edmac->lock, flags);
  605. return ret;
  606. }
  607. static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
  608. struct ep93xx_dma_desc *desc)
  609. {
  610. if (desc) {
  611. unsigned long flags;
  612. spin_lock_irqsave(&edmac->lock, flags);
  613. list_splice_init(&desc->tx_list, &edmac->free_list);
  614. list_add(&desc->node, &edmac->free_list);
  615. spin_unlock_irqrestore(&edmac->lock, flags);
  616. }
  617. }
  618. /**
  619. * ep93xx_dma_advance_work - start processing the next pending transaction
  620. * @edmac: channel
  621. *
  622. * If we have pending transactions queued and we are currently idling, this
  623. * function takes the next queued transaction from the @edmac->queue and
  624. * pushes it to the hardware for execution.
  625. */
  626. static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
  627. {
  628. struct ep93xx_dma_desc *new;
  629. unsigned long flags;
  630. spin_lock_irqsave(&edmac->lock, flags);
  631. if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
  632. spin_unlock_irqrestore(&edmac->lock, flags);
  633. return;
  634. }
  635. /* Take the next descriptor from the pending queue */
  636. new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
  637. list_del_init(&new->node);
  638. ep93xx_dma_set_active(edmac, new);
  639. /* Push it to the hardware */
  640. edmac->edma->hw_submit(edmac);
  641. spin_unlock_irqrestore(&edmac->lock, flags);
  642. }
  643. static void ep93xx_dma_tasklet(unsigned long data)
  644. {
  645. struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
  646. struct ep93xx_dma_desc *desc, *d;
  647. struct dmaengine_desc_callback cb;
  648. LIST_HEAD(list);
  649. memset(&cb, 0, sizeof(cb));
  650. spin_lock_irq(&edmac->lock);
  651. /*
  652. * If dma_terminate_all() was called before we get to run, the active
  653. * list has become empty. If that happens we aren't supposed to do
  654. * anything more than call ep93xx_dma_advance_work().
  655. */
  656. desc = ep93xx_dma_get_active(edmac);
  657. if (desc) {
  658. if (desc->complete) {
  659. /* mark descriptor complete for non cyclic case only */
  660. if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
  661. dma_cookie_complete(&desc->txd);
  662. list_splice_init(&edmac->active, &list);
  663. }
  664. dmaengine_desc_get_callback(&desc->txd, &cb);
  665. }
  666. spin_unlock_irq(&edmac->lock);
  667. /* Pick up the next descriptor from the queue */
  668. ep93xx_dma_advance_work(edmac);
  669. /* Now we can release all the chained descriptors */
  670. list_for_each_entry_safe(desc, d, &list, node) {
  671. dma_descriptor_unmap(&desc->txd);
  672. ep93xx_dma_desc_put(edmac, desc);
  673. }
  674. dmaengine_desc_callback_invoke(&cb, NULL);
  675. }
  676. static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
  677. {
  678. struct ep93xx_dma_chan *edmac = dev_id;
  679. struct ep93xx_dma_desc *desc;
  680. irqreturn_t ret = IRQ_HANDLED;
  681. spin_lock(&edmac->lock);
  682. desc = ep93xx_dma_get_active(edmac);
  683. if (!desc) {
  684. dev_warn(chan2dev(edmac),
  685. "got interrupt while active list is empty\n");
  686. spin_unlock(&edmac->lock);
  687. return IRQ_NONE;
  688. }
  689. switch (edmac->edma->hw_interrupt(edmac)) {
  690. case INTERRUPT_DONE:
  691. desc->complete = true;
  692. tasklet_schedule(&edmac->tasklet);
  693. break;
  694. case INTERRUPT_NEXT_BUFFER:
  695. if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
  696. tasklet_schedule(&edmac->tasklet);
  697. break;
  698. default:
  699. dev_warn(chan2dev(edmac), "unknown interrupt!\n");
  700. ret = IRQ_NONE;
  701. break;
  702. }
  703. spin_unlock(&edmac->lock);
  704. return ret;
  705. }
  706. /**
  707. * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
  708. * @tx: descriptor to be executed
  709. *
  710. * Function will execute given descriptor on the hardware or if the hardware
  711. * is busy, queue the descriptor to be executed later on. Returns cookie which
  712. * can be used to poll the status of the descriptor.
  713. */
  714. static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
  715. {
  716. struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
  717. struct ep93xx_dma_desc *desc;
  718. dma_cookie_t cookie;
  719. unsigned long flags;
  720. spin_lock_irqsave(&edmac->lock, flags);
  721. cookie = dma_cookie_assign(tx);
  722. desc = container_of(tx, struct ep93xx_dma_desc, txd);
  723. /*
  724. * If nothing is currently prosessed, we push this descriptor
  725. * directly to the hardware. Otherwise we put the descriptor
  726. * to the pending queue.
  727. */
  728. if (list_empty(&edmac->active)) {
  729. ep93xx_dma_set_active(edmac, desc);
  730. edmac->edma->hw_submit(edmac);
  731. } else {
  732. list_add_tail(&desc->node, &edmac->queue);
  733. }
  734. spin_unlock_irqrestore(&edmac->lock, flags);
  735. return cookie;
  736. }
  737. /**
  738. * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
  739. * @chan: channel to allocate resources
  740. *
  741. * Function allocates necessary resources for the given DMA channel and
  742. * returns number of allocated descriptors for the channel. Negative errno
  743. * is returned in case of failure.
  744. */
  745. static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
  746. {
  747. struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
  748. struct ep93xx_dma_data *data = chan->private;
  749. const char *name = dma_chan_name(chan);
  750. int ret, i;
  751. /* Sanity check the channel parameters */
  752. if (!edmac->edma->m2m) {
  753. if (!data)
  754. return -EINVAL;
  755. if (data->port < EP93XX_DMA_I2S1 ||
  756. data->port > EP93XX_DMA_IRDA)
  757. return -EINVAL;
  758. if (data->direction != ep93xx_dma_chan_direction(chan))
  759. return -EINVAL;
  760. } else {
  761. if (data) {
  762. switch (data->port) {
  763. case EP93XX_DMA_SSP:
  764. case EP93XX_DMA_IDE:
  765. if (!is_slave_direction(data->direction))
  766. return -EINVAL;
  767. break;
  768. default:
  769. return -EINVAL;
  770. }
  771. }
  772. }
  773. if (data && data->name)
  774. name = data->name;
  775. ret = clk_enable(edmac->clk);
  776. if (ret)
  777. return ret;
  778. ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
  779. if (ret)
  780. goto fail_clk_disable;
  781. spin_lock_irq(&edmac->lock);
  782. dma_cookie_init(&edmac->chan);
  783. ret = edmac->edma->hw_setup(edmac);
  784. spin_unlock_irq(&edmac->lock);
  785. if (ret)
  786. goto fail_free_irq;
  787. for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
  788. struct ep93xx_dma_desc *desc;
  789. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  790. if (!desc) {
  791. dev_warn(chan2dev(edmac), "not enough descriptors\n");
  792. break;
  793. }
  794. INIT_LIST_HEAD(&desc->tx_list);
  795. dma_async_tx_descriptor_init(&desc->txd, chan);
  796. desc->txd.flags = DMA_CTRL_ACK;
  797. desc->txd.tx_submit = ep93xx_dma_tx_submit;
  798. ep93xx_dma_desc_put(edmac, desc);
  799. }
  800. return i;
  801. fail_free_irq:
  802. free_irq(edmac->irq, edmac);
  803. fail_clk_disable:
  804. clk_disable(edmac->clk);
  805. return ret;
  806. }
  807. /**
  808. * ep93xx_dma_free_chan_resources - release resources for the channel
  809. * @chan: channel
  810. *
  811. * Function releases all the resources allocated for the given channel.
  812. * The channel must be idle when this is called.
  813. */
  814. static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
  815. {
  816. struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
  817. struct ep93xx_dma_desc *desc, *d;
  818. unsigned long flags;
  819. LIST_HEAD(list);
  820. BUG_ON(!list_empty(&edmac->active));
  821. BUG_ON(!list_empty(&edmac->queue));
  822. spin_lock_irqsave(&edmac->lock, flags);
  823. edmac->edma->hw_shutdown(edmac);
  824. edmac->runtime_addr = 0;
  825. edmac->runtime_ctrl = 0;
  826. edmac->buffer = 0;
  827. list_splice_init(&edmac->free_list, &list);
  828. spin_unlock_irqrestore(&edmac->lock, flags);
  829. list_for_each_entry_safe(desc, d, &list, node)
  830. kfree(desc);
  831. clk_disable(edmac->clk);
  832. free_irq(edmac->irq, edmac);
  833. }
  834. /**
  835. * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
  836. * @chan: channel
  837. * @dest: destination bus address
  838. * @src: source bus address
  839. * @len: size of the transaction
  840. * @flags: flags for the descriptor
  841. *
  842. * Returns a valid DMA descriptor or %NULL in case of failure.
  843. */
  844. static struct dma_async_tx_descriptor *
  845. ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
  846. dma_addr_t src, size_t len, unsigned long flags)
  847. {
  848. struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
  849. struct ep93xx_dma_desc *desc, *first;
  850. size_t bytes, offset;
  851. first = NULL;
  852. for (offset = 0; offset < len; offset += bytes) {
  853. desc = ep93xx_dma_desc_get(edmac);
  854. if (!desc) {
  855. dev_warn(chan2dev(edmac), "couln't get descriptor\n");
  856. goto fail;
  857. }
  858. bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
  859. desc->src_addr = src + offset;
  860. desc->dst_addr = dest + offset;
  861. desc->size = bytes;
  862. if (!first)
  863. first = desc;
  864. else
  865. list_add_tail(&desc->node, &first->tx_list);
  866. }
  867. first->txd.cookie = -EBUSY;
  868. first->txd.flags = flags;
  869. return &first->txd;
  870. fail:
  871. ep93xx_dma_desc_put(edmac, first);
  872. return NULL;
  873. }
  874. /**
  875. * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
  876. * @chan: channel
  877. * @sgl: list of buffers to transfer
  878. * @sg_len: number of entries in @sgl
  879. * @dir: direction of tha DMA transfer
  880. * @flags: flags for the descriptor
  881. * @context: operation context (ignored)
  882. *
  883. * Returns a valid DMA descriptor or %NULL in case of failure.
  884. */
  885. static struct dma_async_tx_descriptor *
  886. ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  887. unsigned int sg_len, enum dma_transfer_direction dir,
  888. unsigned long flags, void *context)
  889. {
  890. struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
  891. struct ep93xx_dma_desc *desc, *first;
  892. struct scatterlist *sg;
  893. int i;
  894. if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
  895. dev_warn(chan2dev(edmac),
  896. "channel was configured with different direction\n");
  897. return NULL;
  898. }
  899. if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
  900. dev_warn(chan2dev(edmac),
  901. "channel is already used for cyclic transfers\n");
  902. return NULL;
  903. }
  904. first = NULL;
  905. for_each_sg(sgl, sg, sg_len, i) {
  906. size_t len = sg_dma_len(sg);
  907. if (len > DMA_MAX_CHAN_BYTES) {
  908. dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
  909. len);
  910. goto fail;
  911. }
  912. desc = ep93xx_dma_desc_get(edmac);
  913. if (!desc) {
  914. dev_warn(chan2dev(edmac), "couln't get descriptor\n");
  915. goto fail;
  916. }
  917. if (dir == DMA_MEM_TO_DEV) {
  918. desc->src_addr = sg_dma_address(sg);
  919. desc->dst_addr = edmac->runtime_addr;
  920. } else {
  921. desc->src_addr = edmac->runtime_addr;
  922. desc->dst_addr = sg_dma_address(sg);
  923. }
  924. desc->size = len;
  925. if (!first)
  926. first = desc;
  927. else
  928. list_add_tail(&desc->node, &first->tx_list);
  929. }
  930. first->txd.cookie = -EBUSY;
  931. first->txd.flags = flags;
  932. return &first->txd;
  933. fail:
  934. ep93xx_dma_desc_put(edmac, first);
  935. return NULL;
  936. }
  937. /**
  938. * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
  939. * @chan: channel
  940. * @dma_addr: DMA mapped address of the buffer
  941. * @buf_len: length of the buffer (in bytes)
  942. * @period_len: length of a single period
  943. * @dir: direction of the operation
  944. * @flags: tx descriptor status flags
  945. *
  946. * Prepares a descriptor for cyclic DMA operation. This means that once the
  947. * descriptor is submitted, we will be submitting in a @period_len sized
  948. * buffers and calling callback once the period has been elapsed. Transfer
  949. * terminates only when client calls dmaengine_terminate_all() for this
  950. * channel.
  951. *
  952. * Returns a valid DMA descriptor or %NULL in case of failure.
  953. */
  954. static struct dma_async_tx_descriptor *
  955. ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
  956. size_t buf_len, size_t period_len,
  957. enum dma_transfer_direction dir, unsigned long flags)
  958. {
  959. struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
  960. struct ep93xx_dma_desc *desc, *first;
  961. size_t offset = 0;
  962. if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
  963. dev_warn(chan2dev(edmac),
  964. "channel was configured with different direction\n");
  965. return NULL;
  966. }
  967. if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
  968. dev_warn(chan2dev(edmac),
  969. "channel is already used for cyclic transfers\n");
  970. return NULL;
  971. }
  972. if (period_len > DMA_MAX_CHAN_BYTES) {
  973. dev_warn(chan2dev(edmac), "too big period length %zu\n",
  974. period_len);
  975. return NULL;
  976. }
  977. /* Split the buffer into period size chunks */
  978. first = NULL;
  979. for (offset = 0; offset < buf_len; offset += period_len) {
  980. desc = ep93xx_dma_desc_get(edmac);
  981. if (!desc) {
  982. dev_warn(chan2dev(edmac), "couln't get descriptor\n");
  983. goto fail;
  984. }
  985. if (dir == DMA_MEM_TO_DEV) {
  986. desc->src_addr = dma_addr + offset;
  987. desc->dst_addr = edmac->runtime_addr;
  988. } else {
  989. desc->src_addr = edmac->runtime_addr;
  990. desc->dst_addr = dma_addr + offset;
  991. }
  992. desc->size = period_len;
  993. if (!first)
  994. first = desc;
  995. else
  996. list_add_tail(&desc->node, &first->tx_list);
  997. }
  998. first->txd.cookie = -EBUSY;
  999. return &first->txd;
  1000. fail:
  1001. ep93xx_dma_desc_put(edmac, first);
  1002. return NULL;
  1003. }
  1004. /**
  1005. * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
  1006. * current context.
  1007. * @chan: channel
  1008. *
  1009. * Synchronizes the DMA channel termination to the current context. When this
  1010. * function returns it is guaranteed that all transfers for previously issued
  1011. * descriptors have stopped and and it is safe to free the memory associated
  1012. * with them. Furthermore it is guaranteed that all complete callback functions
  1013. * for a previously submitted descriptor have finished running and it is safe to
  1014. * free resources accessed from within the complete callbacks.
  1015. */
  1016. static void ep93xx_dma_synchronize(struct dma_chan *chan)
  1017. {
  1018. struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
  1019. if (edmac->edma->hw_synchronize)
  1020. edmac->edma->hw_synchronize(edmac);
  1021. }
  1022. /**
  1023. * ep93xx_dma_terminate_all - terminate all transactions
  1024. * @chan: channel
  1025. *
  1026. * Stops all DMA transactions. All descriptors are put back to the
  1027. * @edmac->free_list and callbacks are _not_ called.
  1028. */
  1029. static int ep93xx_dma_terminate_all(struct dma_chan *chan)
  1030. {
  1031. struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
  1032. struct ep93xx_dma_desc *desc, *_d;
  1033. unsigned long flags;
  1034. LIST_HEAD(list);
  1035. spin_lock_irqsave(&edmac->lock, flags);
  1036. /* First we disable and flush the DMA channel */
  1037. edmac->edma->hw_shutdown(edmac);
  1038. clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
  1039. list_splice_init(&edmac->active, &list);
  1040. list_splice_init(&edmac->queue, &list);
  1041. /*
  1042. * We then re-enable the channel. This way we can continue submitting
  1043. * the descriptors by just calling ->hw_submit() again.
  1044. */
  1045. edmac->edma->hw_setup(edmac);
  1046. spin_unlock_irqrestore(&edmac->lock, flags);
  1047. list_for_each_entry_safe(desc, _d, &list, node)
  1048. ep93xx_dma_desc_put(edmac, desc);
  1049. return 0;
  1050. }
  1051. static int ep93xx_dma_slave_config(struct dma_chan *chan,
  1052. struct dma_slave_config *config)
  1053. {
  1054. struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
  1055. enum dma_slave_buswidth width;
  1056. unsigned long flags;
  1057. u32 addr, ctrl;
  1058. if (!edmac->edma->m2m)
  1059. return -EINVAL;
  1060. switch (config->direction) {
  1061. case DMA_DEV_TO_MEM:
  1062. width = config->src_addr_width;
  1063. addr = config->src_addr;
  1064. break;
  1065. case DMA_MEM_TO_DEV:
  1066. width = config->dst_addr_width;
  1067. addr = config->dst_addr;
  1068. break;
  1069. default:
  1070. return -EINVAL;
  1071. }
  1072. switch (width) {
  1073. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  1074. ctrl = 0;
  1075. break;
  1076. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  1077. ctrl = M2M_CONTROL_PW_16;
  1078. break;
  1079. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  1080. ctrl = M2M_CONTROL_PW_32;
  1081. break;
  1082. default:
  1083. return -EINVAL;
  1084. }
  1085. spin_lock_irqsave(&edmac->lock, flags);
  1086. edmac->runtime_addr = addr;
  1087. edmac->runtime_ctrl = ctrl;
  1088. spin_unlock_irqrestore(&edmac->lock, flags);
  1089. return 0;
  1090. }
  1091. /**
  1092. * ep93xx_dma_tx_status - check if a transaction is completed
  1093. * @chan: channel
  1094. * @cookie: transaction specific cookie
  1095. * @state: state of the transaction is stored here if given
  1096. *
  1097. * This function can be used to query state of a given transaction.
  1098. */
  1099. static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
  1100. dma_cookie_t cookie,
  1101. struct dma_tx_state *state)
  1102. {
  1103. return dma_cookie_status(chan, cookie, state);
  1104. }
  1105. /**
  1106. * ep93xx_dma_issue_pending - push pending transactions to the hardware
  1107. * @chan: channel
  1108. *
  1109. * When this function is called, all pending transactions are pushed to the
  1110. * hardware and executed.
  1111. */
  1112. static void ep93xx_dma_issue_pending(struct dma_chan *chan)
  1113. {
  1114. ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
  1115. }
  1116. static int __init ep93xx_dma_probe(struct platform_device *pdev)
  1117. {
  1118. struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
  1119. struct ep93xx_dma_engine *edma;
  1120. struct dma_device *dma_dev;
  1121. size_t edma_size;
  1122. int ret, i;
  1123. edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
  1124. edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
  1125. if (!edma)
  1126. return -ENOMEM;
  1127. dma_dev = &edma->dma_dev;
  1128. edma->m2m = platform_get_device_id(pdev)->driver_data;
  1129. edma->num_channels = pdata->num_channels;
  1130. INIT_LIST_HEAD(&dma_dev->channels);
  1131. for (i = 0; i < pdata->num_channels; i++) {
  1132. const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
  1133. struct ep93xx_dma_chan *edmac = &edma->channels[i];
  1134. edmac->chan.device = dma_dev;
  1135. edmac->regs = cdata->base;
  1136. edmac->irq = cdata->irq;
  1137. edmac->edma = edma;
  1138. edmac->clk = clk_get(NULL, cdata->name);
  1139. if (IS_ERR(edmac->clk)) {
  1140. dev_warn(&pdev->dev, "failed to get clock for %s\n",
  1141. cdata->name);
  1142. continue;
  1143. }
  1144. spin_lock_init(&edmac->lock);
  1145. INIT_LIST_HEAD(&edmac->active);
  1146. INIT_LIST_HEAD(&edmac->queue);
  1147. INIT_LIST_HEAD(&edmac->free_list);
  1148. tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
  1149. (unsigned long)edmac);
  1150. list_add_tail(&edmac->chan.device_node,
  1151. &dma_dev->channels);
  1152. }
  1153. dma_cap_zero(dma_dev->cap_mask);
  1154. dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
  1155. dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
  1156. dma_dev->dev = &pdev->dev;
  1157. dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
  1158. dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
  1159. dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
  1160. dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
  1161. dma_dev->device_config = ep93xx_dma_slave_config;
  1162. dma_dev->device_synchronize = ep93xx_dma_synchronize;
  1163. dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
  1164. dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
  1165. dma_dev->device_tx_status = ep93xx_dma_tx_status;
  1166. dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
  1167. if (edma->m2m) {
  1168. dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
  1169. dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
  1170. edma->hw_setup = m2m_hw_setup;
  1171. edma->hw_shutdown = m2m_hw_shutdown;
  1172. edma->hw_submit = m2m_hw_submit;
  1173. edma->hw_interrupt = m2m_hw_interrupt;
  1174. } else {
  1175. dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
  1176. edma->hw_synchronize = m2p_hw_synchronize;
  1177. edma->hw_setup = m2p_hw_setup;
  1178. edma->hw_shutdown = m2p_hw_shutdown;
  1179. edma->hw_submit = m2p_hw_submit;
  1180. edma->hw_interrupt = m2p_hw_interrupt;
  1181. }
  1182. ret = dma_async_device_register(dma_dev);
  1183. if (unlikely(ret)) {
  1184. for (i = 0; i < edma->num_channels; i++) {
  1185. struct ep93xx_dma_chan *edmac = &edma->channels[i];
  1186. if (!IS_ERR_OR_NULL(edmac->clk))
  1187. clk_put(edmac->clk);
  1188. }
  1189. kfree(edma);
  1190. } else {
  1191. dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
  1192. edma->m2m ? "M" : "P");
  1193. }
  1194. return ret;
  1195. }
  1196. static const struct platform_device_id ep93xx_dma_driver_ids[] = {
  1197. { "ep93xx-dma-m2p", 0 },
  1198. { "ep93xx-dma-m2m", 1 },
  1199. { },
  1200. };
  1201. static struct platform_driver ep93xx_dma_driver = {
  1202. .driver = {
  1203. .name = "ep93xx-dma",
  1204. },
  1205. .id_table = ep93xx_dma_driver_ids,
  1206. };
  1207. static int __init ep93xx_dma_module_init(void)
  1208. {
  1209. return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
  1210. }
  1211. subsys_initcall(ep93xx_dma_module_init);
  1212. MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
  1213. MODULE_DESCRIPTION("EP93xx DMA driver");
  1214. MODULE_LICENSE("GPL");