sun4i-dma.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286
  1. /*
  2. * Copyright (C) 2014 Emilio López
  3. * Emilio López <emilio@elopez.com.ar>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. */
  10. #include <linux/bitmap.h>
  11. #include <linux/bitops.h>
  12. #include <linux/clk.h>
  13. #include <linux/dmaengine.h>
  14. #include <linux/dmapool.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/module.h>
  17. #include <linux/of_dma.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/slab.h>
  20. #include <linux/spinlock.h>
  21. #include "virt-dma.h"
  22. /** Common macros to normal and dedicated DMA registers **/
  23. #define SUN4I_DMA_CFG_LOADING BIT(31)
  24. #define SUN4I_DMA_CFG_DST_DATA_WIDTH(width) ((width) << 25)
  25. #define SUN4I_DMA_CFG_DST_BURST_LENGTH(len) ((len) << 23)
  26. #define SUN4I_DMA_CFG_DST_ADDR_MODE(mode) ((mode) << 21)
  27. #define SUN4I_DMA_CFG_DST_DRQ_TYPE(type) ((type) << 16)
  28. #define SUN4I_DMA_CFG_SRC_DATA_WIDTH(width) ((width) << 9)
  29. #define SUN4I_DMA_CFG_SRC_BURST_LENGTH(len) ((len) << 7)
  30. #define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode) ((mode) << 5)
  31. #define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type) (type)
  32. /** Normal DMA register values **/
  33. /* Normal DMA source/destination data request type values */
  34. #define SUN4I_NDMA_DRQ_TYPE_SDRAM 0x16
  35. #define SUN4I_NDMA_DRQ_TYPE_LIMIT (0x1F + 1)
  36. /** Normal DMA register layout **/
  37. /* Dedicated DMA source/destination address mode values */
  38. #define SUN4I_NDMA_ADDR_MODE_LINEAR 0
  39. #define SUN4I_NDMA_ADDR_MODE_IO 1
  40. /* Normal DMA configuration register layout */
  41. #define SUN4I_NDMA_CFG_CONT_MODE BIT(30)
  42. #define SUN4I_NDMA_CFG_WAIT_STATE(n) ((n) << 27)
  43. #define SUN4I_NDMA_CFG_DST_NON_SECURE BIT(22)
  44. #define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
  45. #define SUN4I_NDMA_CFG_SRC_NON_SECURE BIT(6)
  46. /** Dedicated DMA register values **/
  47. /* Dedicated DMA source/destination address mode values */
  48. #define SUN4I_DDMA_ADDR_MODE_LINEAR 0
  49. #define SUN4I_DDMA_ADDR_MODE_IO 1
  50. #define SUN4I_DDMA_ADDR_MODE_HORIZONTAL_PAGE 2
  51. #define SUN4I_DDMA_ADDR_MODE_VERTICAL_PAGE 3
  52. /* Dedicated DMA source/destination data request type values */
  53. #define SUN4I_DDMA_DRQ_TYPE_SDRAM 0x1
  54. #define SUN4I_DDMA_DRQ_TYPE_LIMIT (0x1F + 1)
  55. /** Dedicated DMA register layout **/
  56. /* Dedicated DMA configuration register layout */
  57. #define SUN4I_DDMA_CFG_BUSY BIT(30)
  58. #define SUN4I_DDMA_CFG_CONT_MODE BIT(29)
  59. #define SUN4I_DDMA_CFG_DST_NON_SECURE BIT(28)
  60. #define SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
  61. #define SUN4I_DDMA_CFG_SRC_NON_SECURE BIT(12)
  62. /* Dedicated DMA parameter register layout */
  63. #define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n) (((n) - 1) << 24)
  64. #define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n) (((n) - 1) << 16)
  65. #define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n) (((n) - 1) << 8)
  66. #define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n) (((n) - 1) << 0)
  67. /** DMA register offsets **/
  68. /* General register offsets */
  69. #define SUN4I_DMA_IRQ_ENABLE_REG 0x0
  70. #define SUN4I_DMA_IRQ_PENDING_STATUS_REG 0x4
  71. /* Normal DMA register offsets */
  72. #define SUN4I_NDMA_CHANNEL_REG_BASE(n) (0x100 + (n) * 0x20)
  73. #define SUN4I_NDMA_CFG_REG 0x0
  74. #define SUN4I_NDMA_SRC_ADDR_REG 0x4
  75. #define SUN4I_NDMA_DST_ADDR_REG 0x8
  76. #define SUN4I_NDMA_BYTE_COUNT_REG 0xC
  77. /* Dedicated DMA register offsets */
  78. #define SUN4I_DDMA_CHANNEL_REG_BASE(n) (0x300 + (n) * 0x20)
  79. #define SUN4I_DDMA_CFG_REG 0x0
  80. #define SUN4I_DDMA_SRC_ADDR_REG 0x4
  81. #define SUN4I_DDMA_DST_ADDR_REG 0x8
  82. #define SUN4I_DDMA_BYTE_COUNT_REG 0xC
  83. #define SUN4I_DDMA_PARA_REG 0x18
  84. /** DMA Driver **/
  85. /*
  86. * Normal DMA has 8 channels, and Dedicated DMA has another 8, so
  87. * that's 16 channels. As for endpoints, there's 29 and 21
  88. * respectively. Given that the Normal DMA endpoints (other than
  89. * SDRAM) can be used as tx/rx, we need 78 vchans in total
  90. */
  91. #define SUN4I_NDMA_NR_MAX_CHANNELS 8
  92. #define SUN4I_DDMA_NR_MAX_CHANNELS 8
  93. #define SUN4I_DMA_NR_MAX_CHANNELS \
  94. (SUN4I_NDMA_NR_MAX_CHANNELS + SUN4I_DDMA_NR_MAX_CHANNELS)
  95. #define SUN4I_NDMA_NR_MAX_VCHANS (29 * 2 - 1)
  96. #define SUN4I_DDMA_NR_MAX_VCHANS 21
  97. #define SUN4I_DMA_NR_MAX_VCHANS \
  98. (SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
  99. /* This set of SUN4I_DDMA timing parameters were found experimentally while
  100. * working with the SPI driver and seem to make it behave correctly */
  101. #define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
  102. (SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(1) | \
  103. SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(1) | \
  104. SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) | \
  105. SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
  106. struct sun4i_dma_pchan {
  107. /* Register base of channel */
  108. void __iomem *base;
  109. /* vchan currently being serviced */
  110. struct sun4i_dma_vchan *vchan;
  111. /* Is this a dedicated pchan? */
  112. int is_dedicated;
  113. };
  114. struct sun4i_dma_vchan {
  115. struct virt_dma_chan vc;
  116. struct dma_slave_config cfg;
  117. struct sun4i_dma_pchan *pchan;
  118. struct sun4i_dma_promise *processing;
  119. struct sun4i_dma_contract *contract;
  120. u8 endpoint;
  121. int is_dedicated;
  122. };
  123. struct sun4i_dma_promise {
  124. u32 cfg;
  125. u32 para;
  126. dma_addr_t src;
  127. dma_addr_t dst;
  128. size_t len;
  129. struct list_head list;
  130. };
  131. /* A contract is a set of promises */
  132. struct sun4i_dma_contract {
  133. struct virt_dma_desc vd;
  134. struct list_head demands;
  135. struct list_head completed_demands;
  136. int is_cyclic;
  137. };
  138. struct sun4i_dma_dev {
  139. DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS);
  140. struct dma_device slave;
  141. struct sun4i_dma_pchan *pchans;
  142. struct sun4i_dma_vchan *vchans;
  143. void __iomem *base;
  144. struct clk *clk;
  145. int irq;
  146. spinlock_t lock;
  147. };
  148. static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
  149. {
  150. return container_of(dev, struct sun4i_dma_dev, slave);
  151. }
  152. static struct sun4i_dma_vchan *to_sun4i_dma_vchan(struct dma_chan *chan)
  153. {
  154. return container_of(chan, struct sun4i_dma_vchan, vc.chan);
  155. }
  156. static struct sun4i_dma_contract *to_sun4i_dma_contract(struct virt_dma_desc *vd)
  157. {
  158. return container_of(vd, struct sun4i_dma_contract, vd);
  159. }
  160. static struct device *chan2dev(struct dma_chan *chan)
  161. {
  162. return &chan->dev->device;
  163. }
  164. static int convert_burst(u32 maxburst)
  165. {
  166. if (maxburst > 8)
  167. return -EINVAL;
  168. /* 1 -> 0, 4 -> 1, 8 -> 2 */
  169. return (maxburst >> 2);
  170. }
  171. static int convert_buswidth(enum dma_slave_buswidth addr_width)
  172. {
  173. if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
  174. return -EINVAL;
  175. /* 8 (1 byte) -> 0, 16 (2 bytes) -> 1, 32 (4 bytes) -> 2 */
  176. return (addr_width >> 1);
  177. }
  178. static void sun4i_dma_free_chan_resources(struct dma_chan *chan)
  179. {
  180. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  181. vchan_free_chan_resources(&vchan->vc);
  182. }
  183. static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
  184. struct sun4i_dma_vchan *vchan)
  185. {
  186. struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans;
  187. unsigned long flags;
  188. int i, max;
  189. /*
  190. * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and
  191. * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones
  192. */
  193. if (vchan->is_dedicated) {
  194. i = SUN4I_NDMA_NR_MAX_CHANNELS;
  195. max = SUN4I_DMA_NR_MAX_CHANNELS;
  196. } else {
  197. i = 0;
  198. max = SUN4I_NDMA_NR_MAX_CHANNELS;
  199. }
  200. spin_lock_irqsave(&priv->lock, flags);
  201. for_each_clear_bit_from(i, &priv->pchans_used, max) {
  202. pchan = &pchans[i];
  203. pchan->vchan = vchan;
  204. set_bit(i, priv->pchans_used);
  205. break;
  206. }
  207. spin_unlock_irqrestore(&priv->lock, flags);
  208. return pchan;
  209. }
  210. static void release_pchan(struct sun4i_dma_dev *priv,
  211. struct sun4i_dma_pchan *pchan)
  212. {
  213. unsigned long flags;
  214. int nr = pchan - priv->pchans;
  215. spin_lock_irqsave(&priv->lock, flags);
  216. pchan->vchan = NULL;
  217. clear_bit(nr, priv->pchans_used);
  218. spin_unlock_irqrestore(&priv->lock, flags);
  219. }
  220. static void configure_pchan(struct sun4i_dma_pchan *pchan,
  221. struct sun4i_dma_promise *d)
  222. {
  223. /*
  224. * Configure addresses and misc parameters depending on type
  225. * SUN4I_DDMA has an extra field with timing parameters
  226. */
  227. if (pchan->is_dedicated) {
  228. writel_relaxed(d->src, pchan->base + SUN4I_DDMA_SRC_ADDR_REG);
  229. writel_relaxed(d->dst, pchan->base + SUN4I_DDMA_DST_ADDR_REG);
  230. writel_relaxed(d->len, pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
  231. writel_relaxed(d->para, pchan->base + SUN4I_DDMA_PARA_REG);
  232. writel_relaxed(d->cfg, pchan->base + SUN4I_DDMA_CFG_REG);
  233. } else {
  234. writel_relaxed(d->src, pchan->base + SUN4I_NDMA_SRC_ADDR_REG);
  235. writel_relaxed(d->dst, pchan->base + SUN4I_NDMA_DST_ADDR_REG);
  236. writel_relaxed(d->len, pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
  237. writel_relaxed(d->cfg, pchan->base + SUN4I_NDMA_CFG_REG);
  238. }
  239. }
  240. static void set_pchan_interrupt(struct sun4i_dma_dev *priv,
  241. struct sun4i_dma_pchan *pchan,
  242. int half, int end)
  243. {
  244. u32 reg;
  245. int pchan_number = pchan - priv->pchans;
  246. unsigned long flags;
  247. spin_lock_irqsave(&priv->lock, flags);
  248. reg = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
  249. if (half)
  250. reg |= BIT(pchan_number * 2);
  251. else
  252. reg &= ~BIT(pchan_number * 2);
  253. if (end)
  254. reg |= BIT(pchan_number * 2 + 1);
  255. else
  256. reg &= ~BIT(pchan_number * 2 + 1);
  257. writel_relaxed(reg, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
  258. spin_unlock_irqrestore(&priv->lock, flags);
  259. }
  260. /**
  261. * Execute pending operations on a vchan
  262. *
  263. * When given a vchan, this function will try to acquire a suitable
  264. * pchan and, if successful, will configure it to fulfill a promise
  265. * from the next pending contract.
  266. *
  267. * This function must be called with &vchan->vc.lock held.
  268. */
  269. static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
  270. struct sun4i_dma_vchan *vchan)
  271. {
  272. struct sun4i_dma_promise *promise = NULL;
  273. struct sun4i_dma_contract *contract = NULL;
  274. struct sun4i_dma_pchan *pchan;
  275. struct virt_dma_desc *vd;
  276. int ret;
  277. lockdep_assert_held(&vchan->vc.lock);
  278. /* We need a pchan to do anything, so secure one if available */
  279. pchan = find_and_use_pchan(priv, vchan);
  280. if (!pchan)
  281. return -EBUSY;
  282. /*
  283. * Channel endpoints must not be repeated, so if this vchan
  284. * has already submitted some work, we can't do anything else
  285. */
  286. if (vchan->processing) {
  287. dev_dbg(chan2dev(&vchan->vc.chan),
  288. "processing something to this endpoint already\n");
  289. ret = -EBUSY;
  290. goto release_pchan;
  291. }
  292. do {
  293. /* Figure out which contract we're working with today */
  294. vd = vchan_next_desc(&vchan->vc);
  295. if (!vd) {
  296. dev_dbg(chan2dev(&vchan->vc.chan),
  297. "No pending contract found");
  298. ret = 0;
  299. goto release_pchan;
  300. }
  301. contract = to_sun4i_dma_contract(vd);
  302. if (list_empty(&contract->demands)) {
  303. /* The contract has been completed so mark it as such */
  304. list_del(&contract->vd.node);
  305. vchan_cookie_complete(&contract->vd);
  306. dev_dbg(chan2dev(&vchan->vc.chan),
  307. "Empty contract found and marked complete");
  308. }
  309. } while (list_empty(&contract->demands));
  310. /* Now find out what we need to do */
  311. promise = list_first_entry(&contract->demands,
  312. struct sun4i_dma_promise, list);
  313. vchan->processing = promise;
  314. /* ... and make it reality */
  315. if (promise) {
  316. vchan->contract = contract;
  317. vchan->pchan = pchan;
  318. set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1);
  319. configure_pchan(pchan, promise);
  320. }
  321. return 0;
  322. release_pchan:
  323. release_pchan(priv, pchan);
  324. return ret;
  325. }
  326. static int sanitize_config(struct dma_slave_config *sconfig)
  327. {
  328. switch (sconfig->direction) {
  329. case DMA_MEM_TO_DEV:
  330. if ((sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
  331. !sconfig->dst_maxburst)
  332. return -EINVAL;
  333. if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
  334. sconfig->src_addr_width = sconfig->dst_addr_width;
  335. if (!sconfig->src_maxburst)
  336. sconfig->src_maxburst = sconfig->dst_maxburst;
  337. break;
  338. case DMA_DEV_TO_MEM:
  339. if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
  340. !sconfig->src_maxburst)
  341. return -EINVAL;
  342. if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
  343. sconfig->dst_addr_width = sconfig->src_addr_width;
  344. if (!sconfig->dst_maxburst)
  345. sconfig->dst_maxburst = sconfig->src_maxburst;
  346. break;
  347. default:
  348. return 0;
  349. }
  350. return 0;
  351. }
  352. /**
  353. * Generate a promise, to be used in a normal DMA contract.
  354. *
  355. * A NDMA promise contains all the information required to program the
  356. * normal part of the DMA Engine and get data copied. A non-executed
  357. * promise will live in the demands list on a contract. Once it has been
  358. * completed, it will be moved to the completed demands list for later freeing.
  359. * All linked promises will be freed when the corresponding contract is freed
  360. */
  361. static struct sun4i_dma_promise *
  362. generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
  363. size_t len, struct dma_slave_config *sconfig)
  364. {
  365. struct sun4i_dma_promise *promise;
  366. int ret;
  367. ret = sanitize_config(sconfig);
  368. if (ret)
  369. return NULL;
  370. promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
  371. if (!promise)
  372. return NULL;
  373. promise->src = src;
  374. promise->dst = dest;
  375. promise->len = len;
  376. promise->cfg = SUN4I_DMA_CFG_LOADING |
  377. SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN;
  378. dev_dbg(chan2dev(chan),
  379. "src burst %d, dst burst %d, src buswidth %d, dst buswidth %d",
  380. sconfig->src_maxburst, sconfig->dst_maxburst,
  381. sconfig->src_addr_width, sconfig->dst_addr_width);
  382. /* Source burst */
  383. ret = convert_burst(sconfig->src_maxburst);
  384. if (IS_ERR_VALUE(ret))
  385. goto fail;
  386. promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
  387. /* Destination burst */
  388. ret = convert_burst(sconfig->dst_maxburst);
  389. if (IS_ERR_VALUE(ret))
  390. goto fail;
  391. promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
  392. /* Source bus width */
  393. ret = convert_buswidth(sconfig->src_addr_width);
  394. if (IS_ERR_VALUE(ret))
  395. goto fail;
  396. promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
  397. /* Destination bus width */
  398. ret = convert_buswidth(sconfig->dst_addr_width);
  399. if (IS_ERR_VALUE(ret))
  400. goto fail;
  401. promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
  402. return promise;
  403. fail:
  404. kfree(promise);
  405. return NULL;
  406. }
  407. /**
  408. * Generate a promise, to be used in a dedicated DMA contract.
  409. *
  410. * A DDMA promise contains all the information required to program the
  411. * Dedicated part of the DMA Engine and get data copied. A non-executed
  412. * promise will live in the demands list on a contract. Once it has been
  413. * completed, it will be moved to the completed demands list for later freeing.
  414. * All linked promises will be freed when the corresponding contract is freed
  415. */
  416. static struct sun4i_dma_promise *
  417. generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
  418. size_t len, struct dma_slave_config *sconfig)
  419. {
  420. struct sun4i_dma_promise *promise;
  421. int ret;
  422. promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
  423. if (!promise)
  424. return NULL;
  425. promise->src = src;
  426. promise->dst = dest;
  427. promise->len = len;
  428. promise->cfg = SUN4I_DMA_CFG_LOADING |
  429. SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
  430. /* Source burst */
  431. ret = convert_burst(sconfig->src_maxburst);
  432. if (IS_ERR_VALUE(ret))
  433. goto fail;
  434. promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
  435. /* Destination burst */
  436. ret = convert_burst(sconfig->dst_maxburst);
  437. if (IS_ERR_VALUE(ret))
  438. goto fail;
  439. promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
  440. /* Source bus width */
  441. ret = convert_buswidth(sconfig->src_addr_width);
  442. if (IS_ERR_VALUE(ret))
  443. goto fail;
  444. promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
  445. /* Destination bus width */
  446. ret = convert_buswidth(sconfig->dst_addr_width);
  447. if (IS_ERR_VALUE(ret))
  448. goto fail;
  449. promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
  450. return promise;
  451. fail:
  452. kfree(promise);
  453. return NULL;
  454. }
  455. /**
  456. * Generate a contract
  457. *
  458. * Contracts function as DMA descriptors. As our hardware does not support
  459. * linked lists, we need to implement SG via software. We use a contract
  460. * to hold all the pieces of the request and process them serially one
  461. * after another. Each piece is represented as a promise.
  462. */
  463. static struct sun4i_dma_contract *generate_dma_contract(void)
  464. {
  465. struct sun4i_dma_contract *contract;
  466. contract = kzalloc(sizeof(*contract), GFP_NOWAIT);
  467. if (!contract)
  468. return NULL;
  469. INIT_LIST_HEAD(&contract->demands);
  470. INIT_LIST_HEAD(&contract->completed_demands);
  471. return contract;
  472. }
  473. /**
  474. * Get next promise on a cyclic transfer
  475. *
  476. * Cyclic contracts contain a series of promises which are executed on a
  477. * loop. This function returns the next promise from a cyclic contract,
  478. * so it can be programmed into the hardware.
  479. */
  480. static struct sun4i_dma_promise *
  481. get_next_cyclic_promise(struct sun4i_dma_contract *contract)
  482. {
  483. struct sun4i_dma_promise *promise;
  484. promise = list_first_entry_or_null(&contract->demands,
  485. struct sun4i_dma_promise, list);
  486. if (!promise) {
  487. list_splice_init(&contract->completed_demands,
  488. &contract->demands);
  489. promise = list_first_entry(&contract->demands,
  490. struct sun4i_dma_promise, list);
  491. }
  492. return promise;
  493. }
  494. /**
  495. * Free a contract and all its associated promises
  496. */
  497. static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
  498. {
  499. struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
  500. struct sun4i_dma_promise *promise;
  501. /* Free all the demands and completed demands */
  502. list_for_each_entry(promise, &contract->demands, list)
  503. kfree(promise);
  504. list_for_each_entry(promise, &contract->completed_demands, list)
  505. kfree(promise);
  506. kfree(contract);
  507. }
  508. static struct dma_async_tx_descriptor *
  509. sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
  510. dma_addr_t src, size_t len, unsigned long flags)
  511. {
  512. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  513. struct dma_slave_config *sconfig = &vchan->cfg;
  514. struct sun4i_dma_promise *promise;
  515. struct sun4i_dma_contract *contract;
  516. contract = generate_dma_contract();
  517. if (!contract)
  518. return NULL;
  519. /*
  520. * We can only do the copy to bus aligned addresses, so
  521. * choose the best one so we get decent performance. We also
  522. * maximize the burst size for this same reason.
  523. */
  524. sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  525. sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  526. sconfig->src_maxburst = 8;
  527. sconfig->dst_maxburst = 8;
  528. if (vchan->is_dedicated)
  529. promise = generate_ddma_promise(chan, src, dest, len, sconfig);
  530. else
  531. promise = generate_ndma_promise(chan, src, dest, len, sconfig);
  532. if (!promise) {
  533. kfree(contract);
  534. return NULL;
  535. }
  536. /* Configure memcpy mode */
  537. if (vchan->is_dedicated) {
  538. promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) |
  539. SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM);
  540. } else {
  541. promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
  542. SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
  543. }
  544. /* Fill the contract with our only promise */
  545. list_add_tail(&promise->list, &contract->demands);
  546. /* And add it to the vchan */
  547. return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
  548. }
  549. static struct dma_async_tx_descriptor *
  550. sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
  551. size_t period_len, enum dma_transfer_direction dir,
  552. unsigned long flags)
  553. {
  554. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  555. struct dma_slave_config *sconfig = &vchan->cfg;
  556. struct sun4i_dma_promise *promise;
  557. struct sun4i_dma_contract *contract;
  558. dma_addr_t src, dest;
  559. u32 endpoints;
  560. int nr_periods, offset, plength, i;
  561. if (!is_slave_direction(dir)) {
  562. dev_err(chan2dev(chan), "Invalid DMA direction\n");
  563. return NULL;
  564. }
  565. if (vchan->is_dedicated) {
  566. /*
  567. * As we are using this just for audio data, we need to use
  568. * normal DMA. There is nothing stopping us from supporting
  569. * dedicated DMA here as well, so if a client comes up and
  570. * requires it, it will be simple to implement it.
  571. */
  572. dev_err(chan2dev(chan),
  573. "Cyclic transfers are only supported on Normal DMA\n");
  574. return NULL;
  575. }
  576. contract = generate_dma_contract();
  577. if (!contract)
  578. return NULL;
  579. contract->is_cyclic = 1;
  580. /* Figure out the endpoints and the address we need */
  581. if (dir == DMA_MEM_TO_DEV) {
  582. src = buf;
  583. dest = sconfig->dst_addr;
  584. endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
  585. SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
  586. SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO);
  587. } else {
  588. src = sconfig->src_addr;
  589. dest = buf;
  590. endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
  591. SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) |
  592. SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
  593. }
  594. /*
  595. * We will be using half done interrupts to make two periods
  596. * out of a promise, so we need to program the DMA engine less
  597. * often
  598. */
  599. /*
  600. * The engine can interrupt on half-transfer, so we can use
  601. * this feature to program the engine half as often as if we
  602. * didn't use it (keep in mind the hardware doesn't support
  603. * linked lists).
  604. *
  605. * Say you have a set of periods (| marks the start/end, I for
  606. * interrupt, P for programming the engine to do a new
  607. * transfer), the easy but slow way would be to do
  608. *
  609. * |---|---|---|---| (periods / promises)
  610. * P I,P I,P I,P I
  611. *
  612. * Using half transfer interrupts you can do
  613. *
  614. * |-------|-------| (promises as configured on hw)
  615. * |---|---|---|---| (periods)
  616. * P I I,P I I
  617. *
  618. * Which requires half the engine programming for the same
  619. * functionality.
  620. */
  621. nr_periods = DIV_ROUND_UP(len / period_len, 2);
  622. for (i = 0; i < nr_periods; i++) {
  623. /* Calculate the offset in the buffer and the length needed */
  624. offset = i * period_len * 2;
  625. plength = min((len - offset), (period_len * 2));
  626. if (dir == DMA_MEM_TO_DEV)
  627. src = buf + offset;
  628. else
  629. dest = buf + offset;
  630. /* Make the promise */
  631. promise = generate_ndma_promise(chan, src, dest,
  632. plength, sconfig);
  633. if (!promise) {
  634. /* TODO: should we free everything? */
  635. return NULL;
  636. }
  637. promise->cfg |= endpoints;
  638. /* Then add it to the contract */
  639. list_add_tail(&promise->list, &contract->demands);
  640. }
  641. /* And add it to the vchan */
  642. return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
  643. }
  644. static struct dma_async_tx_descriptor *
  645. sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  646. unsigned int sg_len, enum dma_transfer_direction dir,
  647. unsigned long flags, void *context)
  648. {
  649. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  650. struct dma_slave_config *sconfig = &vchan->cfg;
  651. struct sun4i_dma_promise *promise;
  652. struct sun4i_dma_contract *contract;
  653. u8 ram_type, io_mode, linear_mode;
  654. struct scatterlist *sg;
  655. dma_addr_t srcaddr, dstaddr;
  656. u32 endpoints, para;
  657. int i;
  658. if (!sgl)
  659. return NULL;
  660. if (!is_slave_direction(dir)) {
  661. dev_err(chan2dev(chan), "Invalid DMA direction\n");
  662. return NULL;
  663. }
  664. contract = generate_dma_contract();
  665. if (!contract)
  666. return NULL;
  667. if (vchan->is_dedicated) {
  668. io_mode = SUN4I_DDMA_ADDR_MODE_IO;
  669. linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
  670. ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
  671. } else {
  672. io_mode = SUN4I_NDMA_ADDR_MODE_IO;
  673. linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
  674. ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
  675. }
  676. if (dir == DMA_MEM_TO_DEV)
  677. endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
  678. SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
  679. SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
  680. SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
  681. else
  682. endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
  683. SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
  684. SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
  685. SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
  686. for_each_sg(sgl, sg, sg_len, i) {
  687. /* Figure out addresses */
  688. if (dir == DMA_MEM_TO_DEV) {
  689. srcaddr = sg_dma_address(sg);
  690. dstaddr = sconfig->dst_addr;
  691. } else {
  692. srcaddr = sconfig->src_addr;
  693. dstaddr = sg_dma_address(sg);
  694. }
  695. /*
  696. * These are the magic DMA engine timings that keep SPI going.
  697. * I haven't seen any interface on DMAEngine to configure
  698. * timings, and so far they seem to work for everything we
  699. * support, so I've kept them here. I don't know if other
  700. * devices need different timings because, as usual, we only
  701. * have the "para" bitfield meanings, but no comment on what
  702. * the values should be when doing a certain operation :|
  703. */
  704. para = SUN4I_DDMA_MAGIC_SPI_PARAMETERS;
  705. /* And make a suitable promise */
  706. if (vchan->is_dedicated)
  707. promise = generate_ddma_promise(chan, srcaddr, dstaddr,
  708. sg_dma_len(sg),
  709. sconfig);
  710. else
  711. promise = generate_ndma_promise(chan, srcaddr, dstaddr,
  712. sg_dma_len(sg),
  713. sconfig);
  714. if (!promise)
  715. return NULL; /* TODO: should we free everything? */
  716. promise->cfg |= endpoints;
  717. promise->para = para;
  718. /* Then add it to the contract */
  719. list_add_tail(&promise->list, &contract->demands);
  720. }
  721. /*
  722. * Once we've got all the promises ready, add the contract
  723. * to the pending list on the vchan
  724. */
  725. return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
  726. }
  727. static int sun4i_dma_terminate_all(struct dma_chan *chan)
  728. {
  729. struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
  730. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  731. struct sun4i_dma_pchan *pchan = vchan->pchan;
  732. LIST_HEAD(head);
  733. unsigned long flags;
  734. spin_lock_irqsave(&vchan->vc.lock, flags);
  735. vchan_get_all_descriptors(&vchan->vc, &head);
  736. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  737. /*
  738. * Clearing the configuration register will halt the pchan. Interrupts
  739. * may still trigger, so don't forget to disable them.
  740. */
  741. if (pchan) {
  742. if (pchan->is_dedicated)
  743. writel(0, pchan->base + SUN4I_DDMA_CFG_REG);
  744. else
  745. writel(0, pchan->base + SUN4I_NDMA_CFG_REG);
  746. set_pchan_interrupt(priv, pchan, 0, 0);
  747. release_pchan(priv, pchan);
  748. }
  749. spin_lock_irqsave(&vchan->vc.lock, flags);
  750. vchan_dma_desc_free_list(&vchan->vc, &head);
  751. /* Clear these so the vchan is usable again */
  752. vchan->processing = NULL;
  753. vchan->pchan = NULL;
  754. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  755. return 0;
  756. }
  757. static int sun4i_dma_config(struct dma_chan *chan,
  758. struct dma_slave_config *config)
  759. {
  760. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  761. memcpy(&vchan->cfg, config, sizeof(*config));
  762. return 0;
  763. }
  764. static struct dma_chan *sun4i_dma_of_xlate(struct of_phandle_args *dma_spec,
  765. struct of_dma *ofdma)
  766. {
  767. struct sun4i_dma_dev *priv = ofdma->of_dma_data;
  768. struct sun4i_dma_vchan *vchan;
  769. struct dma_chan *chan;
  770. u8 is_dedicated = dma_spec->args[0];
  771. u8 endpoint = dma_spec->args[1];
  772. /* Check if type is Normal or Dedicated */
  773. if (is_dedicated != 0 && is_dedicated != 1)
  774. return NULL;
  775. /* Make sure the endpoint looks sane */
  776. if ((is_dedicated && endpoint >= SUN4I_DDMA_DRQ_TYPE_LIMIT) ||
  777. (!is_dedicated && endpoint >= SUN4I_NDMA_DRQ_TYPE_LIMIT))
  778. return NULL;
  779. chan = dma_get_any_slave_channel(&priv->slave);
  780. if (!chan)
  781. return NULL;
  782. /* Assign the endpoint to the vchan */
  783. vchan = to_sun4i_dma_vchan(chan);
  784. vchan->is_dedicated = is_dedicated;
  785. vchan->endpoint = endpoint;
  786. return chan;
  787. }
  788. static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan,
  789. dma_cookie_t cookie,
  790. struct dma_tx_state *state)
  791. {
  792. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  793. struct sun4i_dma_pchan *pchan = vchan->pchan;
  794. struct sun4i_dma_contract *contract;
  795. struct sun4i_dma_promise *promise;
  796. struct virt_dma_desc *vd;
  797. unsigned long flags;
  798. enum dma_status ret;
  799. size_t bytes = 0;
  800. ret = dma_cookie_status(chan, cookie, state);
  801. if (!state || (ret == DMA_COMPLETE))
  802. return ret;
  803. spin_lock_irqsave(&vchan->vc.lock, flags);
  804. vd = vchan_find_desc(&vchan->vc, cookie);
  805. if (!vd)
  806. goto exit;
  807. contract = to_sun4i_dma_contract(vd);
  808. list_for_each_entry(promise, &contract->demands, list)
  809. bytes += promise->len;
  810. /*
  811. * The hardware is configured to return the remaining byte
  812. * quantity. If possible, replace the first listed element's
  813. * full size with the actual remaining amount
  814. */
  815. promise = list_first_entry_or_null(&contract->demands,
  816. struct sun4i_dma_promise, list);
  817. if (promise && pchan) {
  818. bytes -= promise->len;
  819. if (pchan->is_dedicated)
  820. bytes += readl(pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
  821. else
  822. bytes += readl(pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
  823. }
  824. exit:
  825. dma_set_residue(state, bytes);
  826. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  827. return ret;
  828. }
  829. static void sun4i_dma_issue_pending(struct dma_chan *chan)
  830. {
  831. struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
  832. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  833. unsigned long flags;
  834. spin_lock_irqsave(&vchan->vc.lock, flags);
  835. /*
  836. * If there are pending transactions for this vchan, push one of
  837. * them into the engine to get the ball rolling.
  838. */
  839. if (vchan_issue_pending(&vchan->vc))
  840. __execute_vchan_pending(priv, vchan);
  841. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  842. }
  843. static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id)
  844. {
  845. struct sun4i_dma_dev *priv = dev_id;
  846. struct sun4i_dma_pchan *pchans = priv->pchans, *pchan;
  847. struct sun4i_dma_vchan *vchan;
  848. struct sun4i_dma_contract *contract;
  849. struct sun4i_dma_promise *promise;
  850. unsigned long pendirq, irqs, disableirqs;
  851. int bit, i, free_room, allow_mitigation = 1;
  852. pendirq = readl_relaxed(priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
  853. handle_pending:
  854. disableirqs = 0;
  855. free_room = 0;
  856. for_each_set_bit(bit, &pendirq, 32) {
  857. pchan = &pchans[bit >> 1];
  858. vchan = pchan->vchan;
  859. if (!vchan) /* a terminated channel may still interrupt */
  860. continue;
  861. contract = vchan->contract;
  862. /*
  863. * Disable the IRQ and free the pchan if it's an end
  864. * interrupt (odd bit)
  865. */
  866. if (bit & 1) {
  867. spin_lock(&vchan->vc.lock);
  868. /*
  869. * Move the promise into the completed list now that
  870. * we're done with it
  871. */
  872. list_del(&vchan->processing->list);
  873. list_add_tail(&vchan->processing->list,
  874. &contract->completed_demands);
  875. /*
  876. * Cyclic DMA transfers are special:
  877. * - There's always something we can dispatch
  878. * - We need to run the callback
  879. * - Latency is very important, as this is used by audio
  880. * We therefore just cycle through the list and dispatch
  881. * whatever we have here, reusing the pchan. There's
  882. * no need to run the thread after this.
  883. *
  884. * For non-cyclic transfers we need to look around,
  885. * so we can program some more work, or notify the
  886. * client that their transfers have been completed.
  887. */
  888. if (contract->is_cyclic) {
  889. promise = get_next_cyclic_promise(contract);
  890. vchan->processing = promise;
  891. configure_pchan(pchan, promise);
  892. vchan_cyclic_callback(&contract->vd);
  893. } else {
  894. vchan->processing = NULL;
  895. vchan->pchan = NULL;
  896. free_room = 1;
  897. disableirqs |= BIT(bit);
  898. release_pchan(priv, pchan);
  899. }
  900. spin_unlock(&vchan->vc.lock);
  901. } else {
  902. /* Half done interrupt */
  903. if (contract->is_cyclic)
  904. vchan_cyclic_callback(&contract->vd);
  905. else
  906. disableirqs |= BIT(bit);
  907. }
  908. }
  909. /* Disable the IRQs for events we handled */
  910. spin_lock(&priv->lock);
  911. irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
  912. writel_relaxed(irqs & ~disableirqs,
  913. priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
  914. spin_unlock(&priv->lock);
  915. /* Writing 1 to the pending field will clear the pending interrupt */
  916. writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
  917. /*
  918. * If a pchan was freed, we may be able to schedule something else,
  919. * so have a look around
  920. */
  921. if (free_room) {
  922. for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
  923. vchan = &priv->vchans[i];
  924. spin_lock(&vchan->vc.lock);
  925. __execute_vchan_pending(priv, vchan);
  926. spin_unlock(&vchan->vc.lock);
  927. }
  928. }
  929. /*
  930. * Handle newer interrupts if some showed up, but only do it once
  931. * to avoid a too long a loop
  932. */
  933. if (allow_mitigation) {
  934. pendirq = readl_relaxed(priv->base +
  935. SUN4I_DMA_IRQ_PENDING_STATUS_REG);
  936. if (pendirq) {
  937. allow_mitigation = 0;
  938. goto handle_pending;
  939. }
  940. }
  941. return IRQ_HANDLED;
  942. }
  943. static int sun4i_dma_probe(struct platform_device *pdev)
  944. {
  945. struct sun4i_dma_dev *priv;
  946. struct resource *res;
  947. int i, j, ret;
  948. priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
  949. if (!priv)
  950. return -ENOMEM;
  951. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  952. priv->base = devm_ioremap_resource(&pdev->dev, res);
  953. if (IS_ERR(priv->base))
  954. return PTR_ERR(priv->base);
  955. priv->irq = platform_get_irq(pdev, 0);
  956. if (priv->irq < 0) {
  957. dev_err(&pdev->dev, "Cannot claim IRQ\n");
  958. return priv->irq;
  959. }
  960. priv->clk = devm_clk_get(&pdev->dev, NULL);
  961. if (IS_ERR(priv->clk)) {
  962. dev_err(&pdev->dev, "No clock specified\n");
  963. return PTR_ERR(priv->clk);
  964. }
  965. platform_set_drvdata(pdev, priv);
  966. spin_lock_init(&priv->lock);
  967. dma_cap_zero(priv->slave.cap_mask);
  968. dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
  969. dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
  970. dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask);
  971. dma_cap_set(DMA_SLAVE, priv->slave.cap_mask);
  972. INIT_LIST_HEAD(&priv->slave.channels);
  973. priv->slave.device_free_chan_resources = sun4i_dma_free_chan_resources;
  974. priv->slave.device_tx_status = sun4i_dma_tx_status;
  975. priv->slave.device_issue_pending = sun4i_dma_issue_pending;
  976. priv->slave.device_prep_slave_sg = sun4i_dma_prep_slave_sg;
  977. priv->slave.device_prep_dma_memcpy = sun4i_dma_prep_dma_memcpy;
  978. priv->slave.device_prep_dma_cyclic = sun4i_dma_prep_dma_cyclic;
  979. priv->slave.device_config = sun4i_dma_config;
  980. priv->slave.device_terminate_all = sun4i_dma_terminate_all;
  981. priv->slave.copy_align = 2;
  982. priv->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  983. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  984. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  985. priv->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  986. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  987. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  988. priv->slave.directions = BIT(DMA_DEV_TO_MEM) |
  989. BIT(DMA_MEM_TO_DEV);
  990. priv->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  991. priv->slave.dev = &pdev->dev;
  992. priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS,
  993. sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
  994. priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
  995. sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
  996. if (!priv->vchans || !priv->pchans)
  997. return -ENOMEM;
  998. /*
  999. * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and
  1000. * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are
  1001. * dedicated ones
  1002. */
  1003. for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++)
  1004. priv->pchans[i].base = priv->base +
  1005. SUN4I_NDMA_CHANNEL_REG_BASE(i);
  1006. for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) {
  1007. priv->pchans[i].base = priv->base +
  1008. SUN4I_DDMA_CHANNEL_REG_BASE(j);
  1009. priv->pchans[i].is_dedicated = 1;
  1010. }
  1011. for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
  1012. struct sun4i_dma_vchan *vchan = &priv->vchans[i];
  1013. spin_lock_init(&vchan->vc.lock);
  1014. vchan->vc.desc_free = sun4i_dma_free_contract;
  1015. vchan_init(&vchan->vc, &priv->slave);
  1016. }
  1017. ret = clk_prepare_enable(priv->clk);
  1018. if (ret) {
  1019. dev_err(&pdev->dev, "Couldn't enable the clock\n");
  1020. return ret;
  1021. }
  1022. /*
  1023. * Make sure the IRQs are all disabled and accounted for. The bootloader
  1024. * likes to leave these dirty
  1025. */
  1026. writel(0, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
  1027. writel(0xFFFFFFFF, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
  1028. ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
  1029. 0, dev_name(&pdev->dev), priv);
  1030. if (ret) {
  1031. dev_err(&pdev->dev, "Cannot request IRQ\n");
  1032. goto err_clk_disable;
  1033. }
  1034. ret = dma_async_device_register(&priv->slave);
  1035. if (ret) {
  1036. dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
  1037. goto err_clk_disable;
  1038. }
  1039. ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
  1040. priv);
  1041. if (ret) {
  1042. dev_err(&pdev->dev, "of_dma_controller_register failed\n");
  1043. goto err_dma_unregister;
  1044. }
  1045. dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
  1046. return 0;
  1047. err_dma_unregister:
  1048. dma_async_device_unregister(&priv->slave);
  1049. err_clk_disable:
  1050. clk_disable_unprepare(priv->clk);
  1051. return ret;
  1052. }
  1053. static int sun4i_dma_remove(struct platform_device *pdev)
  1054. {
  1055. struct sun4i_dma_dev *priv = platform_get_drvdata(pdev);
  1056. /* Disable IRQ so no more work is scheduled */
  1057. disable_irq(priv->irq);
  1058. of_dma_controller_free(pdev->dev.of_node);
  1059. dma_async_device_unregister(&priv->slave);
  1060. clk_disable_unprepare(priv->clk);
  1061. return 0;
  1062. }
  1063. static struct of_device_id sun4i_dma_match[] = {
  1064. { .compatible = "allwinner,sun4i-a10-dma" },
  1065. { /* sentinel */ },
  1066. };
  1067. static struct platform_driver sun4i_dma_driver = {
  1068. .probe = sun4i_dma_probe,
  1069. .remove = sun4i_dma_remove,
  1070. .driver = {
  1071. .name = "sun4i-dma",
  1072. .of_match_table = sun4i_dma_match,
  1073. },
  1074. };
  1075. module_platform_driver(sun4i_dma_driver);
  1076. MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver");
  1077. MODULE_AUTHOR("Emilio López <emilio@elopez.com.ar>");
  1078. MODULE_LICENSE("GPL");