rcar-dmac.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Renesas R-Car Gen2 DMA Controller Driver
  4. *
  5. * Copyright (C) 2014 Renesas Electronics Inc.
  6. *
  7. * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  8. */
  9. #include <linux/delay.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/dmaengine.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/list.h>
  14. #include <linux/module.h>
  15. #include <linux/mutex.h>
  16. #include <linux/of.h>
  17. #include <linux/of_dma.h>
  18. #include <linux/of_platform.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/pm_runtime.h>
  21. #include <linux/slab.h>
  22. #include <linux/spinlock.h>
  23. #include "../dmaengine.h"
  24. /*
  25. * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
  26. * @node: entry in the parent's chunks list
  27. * @src_addr: device source address
  28. * @dst_addr: device destination address
  29. * @size: transfer size in bytes
  30. */
  31. struct rcar_dmac_xfer_chunk {
  32. struct list_head node;
  33. dma_addr_t src_addr;
  34. dma_addr_t dst_addr;
  35. u32 size;
  36. };
  37. /*
  38. * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
  39. * @sar: value of the SAR register (source address)
  40. * @dar: value of the DAR register (destination address)
  41. * @tcr: value of the TCR register (transfer count)
  42. */
  43. struct rcar_dmac_hw_desc {
  44. u32 sar;
  45. u32 dar;
  46. u32 tcr;
  47. u32 reserved;
  48. } __attribute__((__packed__));
  49. /*
  50. * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
  51. * @async_tx: base DMA asynchronous transaction descriptor
  52. * @direction: direction of the DMA transfer
  53. * @xfer_shift: log2 of the transfer size
  54. * @chcr: value of the channel configuration register for this transfer
  55. * @node: entry in the channel's descriptors lists
  56. * @chunks: list of transfer chunks for this transfer
  57. * @running: the transfer chunk being currently processed
  58. * @nchunks: number of transfer chunks for this transfer
  59. * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
  60. * @hwdescs.mem: hardware descriptors memory for the transfer
  61. * @hwdescs.dma: device address of the hardware descriptors memory
  62. * @hwdescs.size: size of the hardware descriptors in bytes
  63. * @size: transfer size in bytes
  64. * @cyclic: when set indicates that the DMA transfer is cyclic
  65. */
  66. struct rcar_dmac_desc {
  67. struct dma_async_tx_descriptor async_tx;
  68. enum dma_transfer_direction direction;
  69. unsigned int xfer_shift;
  70. u32 chcr;
  71. struct list_head node;
  72. struct list_head chunks;
  73. struct rcar_dmac_xfer_chunk *running;
  74. unsigned int nchunks;
  75. struct {
  76. bool use;
  77. struct rcar_dmac_hw_desc *mem;
  78. dma_addr_t dma;
  79. size_t size;
  80. } hwdescs;
  81. unsigned int size;
  82. bool cyclic;
  83. };
  84. #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
  85. /*
  86. * struct rcar_dmac_desc_page - One page worth of descriptors
  87. * @node: entry in the channel's pages list
  88. * @descs: array of DMA descriptors
  89. * @chunks: array of transfer chunk descriptors
  90. */
  91. struct rcar_dmac_desc_page {
  92. struct list_head node;
  93. union {
  94. struct rcar_dmac_desc descs[0];
  95. struct rcar_dmac_xfer_chunk chunks[0];
  96. };
  97. };
  98. #define RCAR_DMAC_DESCS_PER_PAGE \
  99. ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
  100. sizeof(struct rcar_dmac_desc))
  101. #define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
  102. ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
  103. sizeof(struct rcar_dmac_xfer_chunk))
  104. /*
  105. * struct rcar_dmac_chan_slave - Slave configuration
  106. * @slave_addr: slave memory address
  107. * @xfer_size: size (in bytes) of hardware transfers
  108. */
  109. struct rcar_dmac_chan_slave {
  110. phys_addr_t slave_addr;
  111. unsigned int xfer_size;
  112. };
  113. /*
  114. * struct rcar_dmac_chan_map - Map of slave device phys to dma address
  115. * @addr: slave dma address
  116. * @dir: direction of mapping
  117. * @slave: slave configuration that is mapped
  118. */
  119. struct rcar_dmac_chan_map {
  120. dma_addr_t addr;
  121. enum dma_data_direction dir;
  122. struct rcar_dmac_chan_slave slave;
  123. };
  124. /*
  125. * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
  126. * @chan: base DMA channel object
  127. * @iomem: channel I/O memory base
  128. * @index: index of this channel in the controller
  129. * @irq: channel IRQ
  130. * @src: slave memory address and size on the source side
  131. * @dst: slave memory address and size on the destination side
  132. * @mid_rid: hardware MID/RID for the DMA client using this channel
  133. * @lock: protects the channel CHCR register and the desc members
  134. * @desc.free: list of free descriptors
  135. * @desc.pending: list of pending descriptors (submitted with tx_submit)
  136. * @desc.active: list of active descriptors (activated with issue_pending)
  137. * @desc.done: list of completed descriptors
  138. * @desc.wait: list of descriptors waiting for an ack
  139. * @desc.running: the descriptor being processed (a member of the active list)
  140. * @desc.chunks_free: list of free transfer chunk descriptors
  141. * @desc.pages: list of pages used by allocated descriptors
  142. */
  143. struct rcar_dmac_chan {
  144. struct dma_chan chan;
  145. void __iomem *iomem;
  146. unsigned int index;
  147. int irq;
  148. struct rcar_dmac_chan_slave src;
  149. struct rcar_dmac_chan_slave dst;
  150. struct rcar_dmac_chan_map map;
  151. int mid_rid;
  152. spinlock_t lock;
  153. struct {
  154. struct list_head free;
  155. struct list_head pending;
  156. struct list_head active;
  157. struct list_head done;
  158. struct list_head wait;
  159. struct rcar_dmac_desc *running;
  160. struct list_head chunks_free;
  161. struct list_head pages;
  162. } desc;
  163. };
  164. #define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
  165. /*
  166. * struct rcar_dmac - R-Car Gen2 DMA Controller
  167. * @engine: base DMA engine object
  168. * @dev: the hardware device
  169. * @iomem: remapped I/O memory base
  170. * @n_channels: number of available channels
  171. * @channels: array of DMAC channels
  172. * @modules: bitmask of client modules in use
  173. */
  174. struct rcar_dmac {
  175. struct dma_device engine;
  176. struct device *dev;
  177. void __iomem *iomem;
  178. struct device_dma_parameters parms;
  179. unsigned int n_channels;
  180. struct rcar_dmac_chan *channels;
  181. DECLARE_BITMAP(modules, 256);
  182. };
  183. #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
  184. /* -----------------------------------------------------------------------------
  185. * Registers
  186. */
  187. #define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
  188. #define RCAR_DMAISTA 0x0020
  189. #define RCAR_DMASEC 0x0030
  190. #define RCAR_DMAOR 0x0060
  191. #define RCAR_DMAOR_PRI_FIXED (0 << 8)
  192. #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
  193. #define RCAR_DMAOR_AE (1 << 2)
  194. #define RCAR_DMAOR_DME (1 << 0)
  195. #define RCAR_DMACHCLR 0x0080
  196. #define RCAR_DMADPSEC 0x00a0
  197. #define RCAR_DMASAR 0x0000
  198. #define RCAR_DMADAR 0x0004
  199. #define RCAR_DMATCR 0x0008
  200. #define RCAR_DMATCR_MASK 0x00ffffff
  201. #define RCAR_DMATSR 0x0028
  202. #define RCAR_DMACHCR 0x000c
  203. #define RCAR_DMACHCR_CAE (1 << 31)
  204. #define RCAR_DMACHCR_CAIE (1 << 30)
  205. #define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
  206. #define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
  207. #define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
  208. #define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
  209. #define RCAR_DMACHCR_RPT_SAR (1 << 27)
  210. #define RCAR_DMACHCR_RPT_DAR (1 << 26)
  211. #define RCAR_DMACHCR_RPT_TCR (1 << 25)
  212. #define RCAR_DMACHCR_DPB (1 << 22)
  213. #define RCAR_DMACHCR_DSE (1 << 19)
  214. #define RCAR_DMACHCR_DSIE (1 << 18)
  215. #define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
  216. #define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
  217. #define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
  218. #define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
  219. #define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
  220. #define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
  221. #define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
  222. #define RCAR_DMACHCR_DM_FIXED (0 << 14)
  223. #define RCAR_DMACHCR_DM_INC (1 << 14)
  224. #define RCAR_DMACHCR_DM_DEC (2 << 14)
  225. #define RCAR_DMACHCR_SM_FIXED (0 << 12)
  226. #define RCAR_DMACHCR_SM_INC (1 << 12)
  227. #define RCAR_DMACHCR_SM_DEC (2 << 12)
  228. #define RCAR_DMACHCR_RS_AUTO (4 << 8)
  229. #define RCAR_DMACHCR_RS_DMARS (8 << 8)
  230. #define RCAR_DMACHCR_IE (1 << 2)
  231. #define RCAR_DMACHCR_TE (1 << 1)
  232. #define RCAR_DMACHCR_DE (1 << 0)
  233. #define RCAR_DMATCRB 0x0018
  234. #define RCAR_DMATSRB 0x0038
  235. #define RCAR_DMACHCRB 0x001c
  236. #define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
  237. #define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
  238. #define RCAR_DMACHCRB_DPTR_SHIFT 16
  239. #define RCAR_DMACHCRB_DRST (1 << 15)
  240. #define RCAR_DMACHCRB_DTS (1 << 8)
  241. #define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
  242. #define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
  243. #define RCAR_DMACHCRB_PRI(n) ((n) << 0)
  244. #define RCAR_DMARS 0x0040
  245. #define RCAR_DMABUFCR 0x0048
  246. #define RCAR_DMABUFCR_MBU(n) ((n) << 16)
  247. #define RCAR_DMABUFCR_ULB(n) ((n) << 0)
  248. #define RCAR_DMADPBASE 0x0050
  249. #define RCAR_DMADPBASE_MASK 0xfffffff0
  250. #define RCAR_DMADPBASE_SEL (1 << 0)
  251. #define RCAR_DMADPCR 0x0054
  252. #define RCAR_DMADPCR_DIPT(n) ((n) << 24)
  253. #define RCAR_DMAFIXSAR 0x0010
  254. #define RCAR_DMAFIXDAR 0x0014
  255. #define RCAR_DMAFIXDPBASE 0x0060
  256. /* Hardcode the MEMCPY transfer size to 4 bytes. */
  257. #define RCAR_DMAC_MEMCPY_XFER_SIZE 4
  258. /* -----------------------------------------------------------------------------
  259. * Device access
  260. */
  261. static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
  262. {
  263. if (reg == RCAR_DMAOR)
  264. writew(data, dmac->iomem + reg);
  265. else
  266. writel(data, dmac->iomem + reg);
  267. }
  268. static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
  269. {
  270. if (reg == RCAR_DMAOR)
  271. return readw(dmac->iomem + reg);
  272. else
  273. return readl(dmac->iomem + reg);
  274. }
  275. static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
  276. {
  277. if (reg == RCAR_DMARS)
  278. return readw(chan->iomem + reg);
  279. else
  280. return readl(chan->iomem + reg);
  281. }
  282. static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
  283. {
  284. if (reg == RCAR_DMARS)
  285. writew(data, chan->iomem + reg);
  286. else
  287. writel(data, chan->iomem + reg);
  288. }
  289. /* -----------------------------------------------------------------------------
  290. * Initialization and configuration
  291. */
  292. static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
  293. {
  294. u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
  295. return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE));
  296. }
  297. static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
  298. {
  299. struct rcar_dmac_desc *desc = chan->desc.running;
  300. u32 chcr = desc->chcr;
  301. WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
  302. if (chan->mid_rid >= 0)
  303. rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
  304. if (desc->hwdescs.use) {
  305. struct rcar_dmac_xfer_chunk *chunk =
  306. list_first_entry(&desc->chunks,
  307. struct rcar_dmac_xfer_chunk, node);
  308. dev_dbg(chan->chan.device->dev,
  309. "chan%u: queue desc %p: %u@%pad\n",
  310. chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
  311. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  312. rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
  313. chunk->src_addr >> 32);
  314. rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
  315. chunk->dst_addr >> 32);
  316. rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
  317. desc->hwdescs.dma >> 32);
  318. #endif
  319. rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
  320. (desc->hwdescs.dma & 0xfffffff0) |
  321. RCAR_DMADPBASE_SEL);
  322. rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
  323. RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
  324. RCAR_DMACHCRB_DRST);
  325. /*
  326. * Errata: When descriptor memory is accessed through an IOMMU
  327. * the DMADAR register isn't initialized automatically from the
  328. * first descriptor at beginning of transfer by the DMAC like it
  329. * should. Initialize it manually with the destination address
  330. * of the first chunk.
  331. */
  332. rcar_dmac_chan_write(chan, RCAR_DMADAR,
  333. chunk->dst_addr & 0xffffffff);
  334. /*
  335. * Program the descriptor stage interrupt to occur after the end
  336. * of the first stage.
  337. */
  338. rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
  339. chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
  340. | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
  341. /*
  342. * If the descriptor isn't cyclic enable normal descriptor mode
  343. * and the transfer completion interrupt.
  344. */
  345. if (!desc->cyclic)
  346. chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
  347. /*
  348. * If the descriptor is cyclic and has a callback enable the
  349. * descriptor stage interrupt in infinite repeat mode.
  350. */
  351. else if (desc->async_tx.callback)
  352. chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
  353. /*
  354. * Otherwise just select infinite repeat mode without any
  355. * interrupt.
  356. */
  357. else
  358. chcr |= RCAR_DMACHCR_DPM_INFINITE;
  359. } else {
  360. struct rcar_dmac_xfer_chunk *chunk = desc->running;
  361. dev_dbg(chan->chan.device->dev,
  362. "chan%u: queue chunk %p: %u@%pad -> %pad\n",
  363. chan->index, chunk, chunk->size, &chunk->src_addr,
  364. &chunk->dst_addr);
  365. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  366. rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
  367. chunk->src_addr >> 32);
  368. rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
  369. chunk->dst_addr >> 32);
  370. #endif
  371. rcar_dmac_chan_write(chan, RCAR_DMASAR,
  372. chunk->src_addr & 0xffffffff);
  373. rcar_dmac_chan_write(chan, RCAR_DMADAR,
  374. chunk->dst_addr & 0xffffffff);
  375. rcar_dmac_chan_write(chan, RCAR_DMATCR,
  376. chunk->size >> desc->xfer_shift);
  377. chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
  378. }
  379. rcar_dmac_chan_write(chan, RCAR_DMACHCR,
  380. chcr | RCAR_DMACHCR_DE | RCAR_DMACHCR_CAIE);
  381. }
  382. static int rcar_dmac_init(struct rcar_dmac *dmac)
  383. {
  384. u16 dmaor;
  385. /* Clear all channels and enable the DMAC globally. */
  386. rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0));
  387. rcar_dmac_write(dmac, RCAR_DMAOR,
  388. RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
  389. dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
  390. if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
  391. dev_warn(dmac->dev, "DMAOR initialization failed.\n");
  392. return -EIO;
  393. }
  394. return 0;
  395. }
  396. /* -----------------------------------------------------------------------------
  397. * Descriptors submission
  398. */
  399. static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
  400. {
  401. struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
  402. struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
  403. unsigned long flags;
  404. dma_cookie_t cookie;
  405. spin_lock_irqsave(&chan->lock, flags);
  406. cookie = dma_cookie_assign(tx);
  407. dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
  408. chan->index, tx->cookie, desc);
  409. list_add_tail(&desc->node, &chan->desc.pending);
  410. desc->running = list_first_entry(&desc->chunks,
  411. struct rcar_dmac_xfer_chunk, node);
  412. spin_unlock_irqrestore(&chan->lock, flags);
  413. return cookie;
  414. }
  415. /* -----------------------------------------------------------------------------
  416. * Descriptors allocation and free
  417. */
  418. /*
  419. * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
  420. * @chan: the DMA channel
  421. * @gfp: allocation flags
  422. */
  423. static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
  424. {
  425. struct rcar_dmac_desc_page *page;
  426. unsigned long flags;
  427. LIST_HEAD(list);
  428. unsigned int i;
  429. page = (void *)get_zeroed_page(gfp);
  430. if (!page)
  431. return -ENOMEM;
  432. for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
  433. struct rcar_dmac_desc *desc = &page->descs[i];
  434. dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
  435. desc->async_tx.tx_submit = rcar_dmac_tx_submit;
  436. INIT_LIST_HEAD(&desc->chunks);
  437. list_add_tail(&desc->node, &list);
  438. }
  439. spin_lock_irqsave(&chan->lock, flags);
  440. list_splice_tail(&list, &chan->desc.free);
  441. list_add_tail(&page->node, &chan->desc.pages);
  442. spin_unlock_irqrestore(&chan->lock, flags);
  443. return 0;
  444. }
  445. /*
  446. * rcar_dmac_desc_put - Release a DMA transfer descriptor
  447. * @chan: the DMA channel
  448. * @desc: the descriptor
  449. *
  450. * Put the descriptor and its transfer chunk descriptors back in the channel's
  451. * free descriptors lists. The descriptor's chunks list will be reinitialized to
  452. * an empty list as a result.
  453. *
  454. * The descriptor must have been removed from the channel's lists before calling
  455. * this function.
  456. */
  457. static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
  458. struct rcar_dmac_desc *desc)
  459. {
  460. unsigned long flags;
  461. spin_lock_irqsave(&chan->lock, flags);
  462. list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
  463. list_add(&desc->node, &chan->desc.free);
  464. spin_unlock_irqrestore(&chan->lock, flags);
  465. }
  466. static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
  467. {
  468. struct rcar_dmac_desc *desc, *_desc;
  469. unsigned long flags;
  470. LIST_HEAD(list);
  471. /*
  472. * We have to temporarily move all descriptors from the wait list to a
  473. * local list as iterating over the wait list, even with
  474. * list_for_each_entry_safe, isn't safe if we release the channel lock
  475. * around the rcar_dmac_desc_put() call.
  476. */
  477. spin_lock_irqsave(&chan->lock, flags);
  478. list_splice_init(&chan->desc.wait, &list);
  479. spin_unlock_irqrestore(&chan->lock, flags);
  480. list_for_each_entry_safe(desc, _desc, &list, node) {
  481. if (async_tx_test_ack(&desc->async_tx)) {
  482. list_del(&desc->node);
  483. rcar_dmac_desc_put(chan, desc);
  484. }
  485. }
  486. if (list_empty(&list))
  487. return;
  488. /* Put the remaining descriptors back in the wait list. */
  489. spin_lock_irqsave(&chan->lock, flags);
  490. list_splice(&list, &chan->desc.wait);
  491. spin_unlock_irqrestore(&chan->lock, flags);
  492. }
  493. /*
  494. * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
  495. * @chan: the DMA channel
  496. *
  497. * Locking: This function must be called in a non-atomic context.
  498. *
  499. * Return: A pointer to the allocated descriptor or NULL if no descriptor can
  500. * be allocated.
  501. */
  502. static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
  503. {
  504. struct rcar_dmac_desc *desc;
  505. unsigned long flags;
  506. int ret;
  507. /* Recycle acked descriptors before attempting allocation. */
  508. rcar_dmac_desc_recycle_acked(chan);
  509. spin_lock_irqsave(&chan->lock, flags);
  510. while (list_empty(&chan->desc.free)) {
  511. /*
  512. * No free descriptors, allocate a page worth of them and try
  513. * again, as someone else could race us to get the newly
  514. * allocated descriptors. If the allocation fails return an
  515. * error.
  516. */
  517. spin_unlock_irqrestore(&chan->lock, flags);
  518. ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
  519. if (ret < 0)
  520. return NULL;
  521. spin_lock_irqsave(&chan->lock, flags);
  522. }
  523. desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
  524. list_del(&desc->node);
  525. spin_unlock_irqrestore(&chan->lock, flags);
  526. return desc;
  527. }
  528. /*
  529. * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
  530. * @chan: the DMA channel
  531. * @gfp: allocation flags
  532. */
  533. static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
  534. {
  535. struct rcar_dmac_desc_page *page;
  536. unsigned long flags;
  537. LIST_HEAD(list);
  538. unsigned int i;
  539. page = (void *)get_zeroed_page(gfp);
  540. if (!page)
  541. return -ENOMEM;
  542. for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
  543. struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
  544. list_add_tail(&chunk->node, &list);
  545. }
  546. spin_lock_irqsave(&chan->lock, flags);
  547. list_splice_tail(&list, &chan->desc.chunks_free);
  548. list_add_tail(&page->node, &chan->desc.pages);
  549. spin_unlock_irqrestore(&chan->lock, flags);
  550. return 0;
  551. }
  552. /*
  553. * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
  554. * @chan: the DMA channel
  555. *
  556. * Locking: This function must be called in a non-atomic context.
  557. *
  558. * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
  559. * descriptor can be allocated.
  560. */
  561. static struct rcar_dmac_xfer_chunk *
  562. rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
  563. {
  564. struct rcar_dmac_xfer_chunk *chunk;
  565. unsigned long flags;
  566. int ret;
  567. spin_lock_irqsave(&chan->lock, flags);
  568. while (list_empty(&chan->desc.chunks_free)) {
  569. /*
  570. * No free descriptors, allocate a page worth of them and try
  571. * again, as someone else could race us to get the newly
  572. * allocated descriptors. If the allocation fails return an
  573. * error.
  574. */
  575. spin_unlock_irqrestore(&chan->lock, flags);
  576. ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
  577. if (ret < 0)
  578. return NULL;
  579. spin_lock_irqsave(&chan->lock, flags);
  580. }
  581. chunk = list_first_entry(&chan->desc.chunks_free,
  582. struct rcar_dmac_xfer_chunk, node);
  583. list_del(&chunk->node);
  584. spin_unlock_irqrestore(&chan->lock, flags);
  585. return chunk;
  586. }
  587. static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
  588. struct rcar_dmac_desc *desc, size_t size)
  589. {
  590. /*
  591. * dma_alloc_coherent() allocates memory in page size increments. To
  592. * avoid reallocating the hardware descriptors when the allocated size
  593. * wouldn't change align the requested size to a multiple of the page
  594. * size.
  595. */
  596. size = PAGE_ALIGN(size);
  597. if (desc->hwdescs.size == size)
  598. return;
  599. if (desc->hwdescs.mem) {
  600. dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
  601. desc->hwdescs.mem, desc->hwdescs.dma);
  602. desc->hwdescs.mem = NULL;
  603. desc->hwdescs.size = 0;
  604. }
  605. if (!size)
  606. return;
  607. desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
  608. &desc->hwdescs.dma, GFP_NOWAIT);
  609. if (!desc->hwdescs.mem)
  610. return;
  611. desc->hwdescs.size = size;
  612. }
  613. static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
  614. struct rcar_dmac_desc *desc)
  615. {
  616. struct rcar_dmac_xfer_chunk *chunk;
  617. struct rcar_dmac_hw_desc *hwdesc;
  618. rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
  619. hwdesc = desc->hwdescs.mem;
  620. if (!hwdesc)
  621. return -ENOMEM;
  622. list_for_each_entry(chunk, &desc->chunks, node) {
  623. hwdesc->sar = chunk->src_addr;
  624. hwdesc->dar = chunk->dst_addr;
  625. hwdesc->tcr = chunk->size >> desc->xfer_shift;
  626. hwdesc++;
  627. }
  628. return 0;
  629. }
  630. /* -----------------------------------------------------------------------------
  631. * Stop and reset
  632. */
  633. static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan)
  634. {
  635. u32 chcr;
  636. unsigned int i;
  637. /*
  638. * Ensure that the setting of the DE bit is actually 0 after
  639. * clearing it.
  640. */
  641. for (i = 0; i < 1024; i++) {
  642. chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
  643. if (!(chcr & RCAR_DMACHCR_DE))
  644. return;
  645. udelay(1);
  646. }
  647. dev_err(chan->chan.device->dev, "CHCR DE check error\n");
  648. }
  649. static void rcar_dmac_clear_chcr_de(struct rcar_dmac_chan *chan)
  650. {
  651. u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
  652. /* set DE=0 and flush remaining data */
  653. rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE));
  654. /* make sure all remaining data was flushed */
  655. rcar_dmac_chcr_de_barrier(chan);
  656. }
  657. static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
  658. {
  659. u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
  660. chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
  661. RCAR_DMACHCR_TE | RCAR_DMACHCR_DE |
  662. RCAR_DMACHCR_CAE | RCAR_DMACHCR_CAIE);
  663. rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
  664. rcar_dmac_chcr_de_barrier(chan);
  665. }
  666. static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
  667. {
  668. struct rcar_dmac_desc *desc, *_desc;
  669. unsigned long flags;
  670. LIST_HEAD(descs);
  671. spin_lock_irqsave(&chan->lock, flags);
  672. /* Move all non-free descriptors to the local lists. */
  673. list_splice_init(&chan->desc.pending, &descs);
  674. list_splice_init(&chan->desc.active, &descs);
  675. list_splice_init(&chan->desc.done, &descs);
  676. list_splice_init(&chan->desc.wait, &descs);
  677. chan->desc.running = NULL;
  678. spin_unlock_irqrestore(&chan->lock, flags);
  679. list_for_each_entry_safe(desc, _desc, &descs, node) {
  680. list_del(&desc->node);
  681. rcar_dmac_desc_put(chan, desc);
  682. }
  683. }
  684. static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
  685. {
  686. unsigned int i;
  687. /* Stop all channels. */
  688. for (i = 0; i < dmac->n_channels; ++i) {
  689. struct rcar_dmac_chan *chan = &dmac->channels[i];
  690. /* Stop and reinitialize the channel. */
  691. spin_lock_irq(&chan->lock);
  692. rcar_dmac_chan_halt(chan);
  693. spin_unlock_irq(&chan->lock);
  694. }
  695. }
  696. static int rcar_dmac_chan_pause(struct dma_chan *chan)
  697. {
  698. unsigned long flags;
  699. struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
  700. spin_lock_irqsave(&rchan->lock, flags);
  701. rcar_dmac_clear_chcr_de(rchan);
  702. spin_unlock_irqrestore(&rchan->lock, flags);
  703. return 0;
  704. }
  705. /* -----------------------------------------------------------------------------
  706. * Descriptors preparation
  707. */
  708. static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
  709. struct rcar_dmac_desc *desc)
  710. {
  711. static const u32 chcr_ts[] = {
  712. RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
  713. RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
  714. RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
  715. RCAR_DMACHCR_TS_64B,
  716. };
  717. unsigned int xfer_size;
  718. u32 chcr;
  719. switch (desc->direction) {
  720. case DMA_DEV_TO_MEM:
  721. chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
  722. | RCAR_DMACHCR_RS_DMARS;
  723. xfer_size = chan->src.xfer_size;
  724. break;
  725. case DMA_MEM_TO_DEV:
  726. chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
  727. | RCAR_DMACHCR_RS_DMARS;
  728. xfer_size = chan->dst.xfer_size;
  729. break;
  730. case DMA_MEM_TO_MEM:
  731. default:
  732. chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
  733. | RCAR_DMACHCR_RS_AUTO;
  734. xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
  735. break;
  736. }
  737. desc->xfer_shift = ilog2(xfer_size);
  738. desc->chcr = chcr | chcr_ts[desc->xfer_shift];
  739. }
  740. /*
  741. * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
  742. *
  743. * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
  744. * converted to scatter-gather to guarantee consistent locking and a correct
  745. * list manipulation. For slave DMA direction carries the usual meaning, and,
  746. * logically, the SG list is RAM and the addr variable contains slave address,
  747. * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
  748. * and the SG list contains only one element and points at the source buffer.
  749. */
  750. static struct dma_async_tx_descriptor *
  751. rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
  752. unsigned int sg_len, dma_addr_t dev_addr,
  753. enum dma_transfer_direction dir, unsigned long dma_flags,
  754. bool cyclic)
  755. {
  756. struct rcar_dmac_xfer_chunk *chunk;
  757. struct rcar_dmac_desc *desc;
  758. struct scatterlist *sg;
  759. unsigned int nchunks = 0;
  760. unsigned int max_chunk_size;
  761. unsigned int full_size = 0;
  762. bool cross_boundary = false;
  763. unsigned int i;
  764. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  765. u32 high_dev_addr;
  766. u32 high_mem_addr;
  767. #endif
  768. desc = rcar_dmac_desc_get(chan);
  769. if (!desc)
  770. return NULL;
  771. desc->async_tx.flags = dma_flags;
  772. desc->async_tx.cookie = -EBUSY;
  773. desc->cyclic = cyclic;
  774. desc->direction = dir;
  775. rcar_dmac_chan_configure_desc(chan, desc);
  776. max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
  777. /*
  778. * Allocate and fill the transfer chunk descriptors. We own the only
  779. * reference to the DMA descriptor, there's no need for locking.
  780. */
  781. for_each_sg(sgl, sg, sg_len, i) {
  782. dma_addr_t mem_addr = sg_dma_address(sg);
  783. unsigned int len = sg_dma_len(sg);
  784. full_size += len;
  785. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  786. if (i == 0) {
  787. high_dev_addr = dev_addr >> 32;
  788. high_mem_addr = mem_addr >> 32;
  789. }
  790. if ((dev_addr >> 32 != high_dev_addr) ||
  791. (mem_addr >> 32 != high_mem_addr))
  792. cross_boundary = true;
  793. #endif
  794. while (len) {
  795. unsigned int size = min(len, max_chunk_size);
  796. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  797. /*
  798. * Prevent individual transfers from crossing 4GB
  799. * boundaries.
  800. */
  801. if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) {
  802. size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
  803. cross_boundary = true;
  804. }
  805. if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) {
  806. size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
  807. cross_boundary = true;
  808. }
  809. #endif
  810. chunk = rcar_dmac_xfer_chunk_get(chan);
  811. if (!chunk) {
  812. rcar_dmac_desc_put(chan, desc);
  813. return NULL;
  814. }
  815. if (dir == DMA_DEV_TO_MEM) {
  816. chunk->src_addr = dev_addr;
  817. chunk->dst_addr = mem_addr;
  818. } else {
  819. chunk->src_addr = mem_addr;
  820. chunk->dst_addr = dev_addr;
  821. }
  822. chunk->size = size;
  823. dev_dbg(chan->chan.device->dev,
  824. "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
  825. chan->index, chunk, desc, i, sg, size, len,
  826. &chunk->src_addr, &chunk->dst_addr);
  827. mem_addr += size;
  828. if (dir == DMA_MEM_TO_MEM)
  829. dev_addr += size;
  830. len -= size;
  831. list_add_tail(&chunk->node, &desc->chunks);
  832. nchunks++;
  833. }
  834. }
  835. desc->nchunks = nchunks;
  836. desc->size = full_size;
  837. /*
  838. * Use hardware descriptor lists if possible when more than one chunk
  839. * needs to be transferred (otherwise they don't make much sense).
  840. *
  841. * Source/Destination address should be located in same 4GiB region
  842. * in the 40bit address space when it uses Hardware descriptor,
  843. * and cross_boundary is checking it.
  844. */
  845. desc->hwdescs.use = !cross_boundary && nchunks > 1;
  846. if (desc->hwdescs.use) {
  847. if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
  848. desc->hwdescs.use = false;
  849. }
  850. return &desc->async_tx;
  851. }
  852. /* -----------------------------------------------------------------------------
  853. * DMA engine operations
  854. */
  855. static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
  856. {
  857. struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
  858. int ret;
  859. INIT_LIST_HEAD(&rchan->desc.chunks_free);
  860. INIT_LIST_HEAD(&rchan->desc.pages);
  861. /* Preallocate descriptors. */
  862. ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
  863. if (ret < 0)
  864. return -ENOMEM;
  865. ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
  866. if (ret < 0)
  867. return -ENOMEM;
  868. return pm_runtime_get_sync(chan->device->dev);
  869. }
  870. static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
  871. {
  872. struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
  873. struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
  874. struct rcar_dmac_chan_map *map = &rchan->map;
  875. struct rcar_dmac_desc_page *page, *_page;
  876. struct rcar_dmac_desc *desc;
  877. LIST_HEAD(list);
  878. /* Protect against ISR */
  879. spin_lock_irq(&rchan->lock);
  880. rcar_dmac_chan_halt(rchan);
  881. spin_unlock_irq(&rchan->lock);
  882. /*
  883. * Now no new interrupts will occur, but one might already be
  884. * running. Wait for it to finish before freeing resources.
  885. */
  886. synchronize_irq(rchan->irq);
  887. if (rchan->mid_rid >= 0) {
  888. /* The caller is holding dma_list_mutex */
  889. clear_bit(rchan->mid_rid, dmac->modules);
  890. rchan->mid_rid = -EINVAL;
  891. }
  892. list_splice_init(&rchan->desc.free, &list);
  893. list_splice_init(&rchan->desc.pending, &list);
  894. list_splice_init(&rchan->desc.active, &list);
  895. list_splice_init(&rchan->desc.done, &list);
  896. list_splice_init(&rchan->desc.wait, &list);
  897. rchan->desc.running = NULL;
  898. list_for_each_entry(desc, &list, node)
  899. rcar_dmac_realloc_hwdesc(rchan, desc, 0);
  900. list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
  901. list_del(&page->node);
  902. free_page((unsigned long)page);
  903. }
  904. /* Remove slave mapping if present. */
  905. if (map->slave.xfer_size) {
  906. dma_unmap_resource(chan->device->dev, map->addr,
  907. map->slave.xfer_size, map->dir, 0);
  908. map->slave.xfer_size = 0;
  909. }
  910. pm_runtime_put(chan->device->dev);
  911. }
  912. static struct dma_async_tx_descriptor *
  913. rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
  914. dma_addr_t dma_src, size_t len, unsigned long flags)
  915. {
  916. struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
  917. struct scatterlist sgl;
  918. if (!len)
  919. return NULL;
  920. sg_init_table(&sgl, 1);
  921. sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
  922. offset_in_page(dma_src));
  923. sg_dma_address(&sgl) = dma_src;
  924. sg_dma_len(&sgl) = len;
  925. return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
  926. DMA_MEM_TO_MEM, flags, false);
  927. }
  928. static int rcar_dmac_map_slave_addr(struct dma_chan *chan,
  929. enum dma_transfer_direction dir)
  930. {
  931. struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
  932. struct rcar_dmac_chan_map *map = &rchan->map;
  933. phys_addr_t dev_addr;
  934. size_t dev_size;
  935. enum dma_data_direction dev_dir;
  936. if (dir == DMA_DEV_TO_MEM) {
  937. dev_addr = rchan->src.slave_addr;
  938. dev_size = rchan->src.xfer_size;
  939. dev_dir = DMA_TO_DEVICE;
  940. } else {
  941. dev_addr = rchan->dst.slave_addr;
  942. dev_size = rchan->dst.xfer_size;
  943. dev_dir = DMA_FROM_DEVICE;
  944. }
  945. /* Reuse current map if possible. */
  946. if (dev_addr == map->slave.slave_addr &&
  947. dev_size == map->slave.xfer_size &&
  948. dev_dir == map->dir)
  949. return 0;
  950. /* Remove old mapping if present. */
  951. if (map->slave.xfer_size)
  952. dma_unmap_resource(chan->device->dev, map->addr,
  953. map->slave.xfer_size, map->dir, 0);
  954. map->slave.xfer_size = 0;
  955. /* Create new slave address map. */
  956. map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size,
  957. dev_dir, 0);
  958. if (dma_mapping_error(chan->device->dev, map->addr)) {
  959. dev_err(chan->device->dev,
  960. "chan%u: failed to map %zx@%pap", rchan->index,
  961. dev_size, &dev_addr);
  962. return -EIO;
  963. }
  964. dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n",
  965. rchan->index, dev_size, &dev_addr, &map->addr,
  966. dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE");
  967. map->slave.slave_addr = dev_addr;
  968. map->slave.xfer_size = dev_size;
  969. map->dir = dev_dir;
  970. return 0;
  971. }
  972. static struct dma_async_tx_descriptor *
  973. rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  974. unsigned int sg_len, enum dma_transfer_direction dir,
  975. unsigned long flags, void *context)
  976. {
  977. struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
  978. /* Someone calling slave DMA on a generic channel? */
  979. if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
  980. dev_warn(chan->device->dev,
  981. "%s: bad parameter: len=%d, id=%d\n",
  982. __func__, sg_len, rchan->mid_rid);
  983. return NULL;
  984. }
  985. if (rcar_dmac_map_slave_addr(chan, dir))
  986. return NULL;
  987. return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
  988. dir, flags, false);
  989. }
  990. #define RCAR_DMAC_MAX_SG_LEN 32
  991. static struct dma_async_tx_descriptor *
  992. rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
  993. size_t buf_len, size_t period_len,
  994. enum dma_transfer_direction dir, unsigned long flags)
  995. {
  996. struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
  997. struct dma_async_tx_descriptor *desc;
  998. struct scatterlist *sgl;
  999. unsigned int sg_len;
  1000. unsigned int i;
  1001. /* Someone calling slave DMA on a generic channel? */
  1002. if (rchan->mid_rid < 0 || buf_len < period_len) {
  1003. dev_warn(chan->device->dev,
  1004. "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
  1005. __func__, buf_len, period_len, rchan->mid_rid);
  1006. return NULL;
  1007. }
  1008. if (rcar_dmac_map_slave_addr(chan, dir))
  1009. return NULL;
  1010. sg_len = buf_len / period_len;
  1011. if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
  1012. dev_err(chan->device->dev,
  1013. "chan%u: sg length %d exceds limit %d",
  1014. rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
  1015. return NULL;
  1016. }
  1017. /*
  1018. * Allocate the sg list dynamically as it would consume too much stack
  1019. * space.
  1020. */
  1021. sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
  1022. if (!sgl)
  1023. return NULL;
  1024. sg_init_table(sgl, sg_len);
  1025. for (i = 0; i < sg_len; ++i) {
  1026. dma_addr_t src = buf_addr + (period_len * i);
  1027. sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
  1028. offset_in_page(src));
  1029. sg_dma_address(&sgl[i]) = src;
  1030. sg_dma_len(&sgl[i]) = period_len;
  1031. }
  1032. desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
  1033. dir, flags, true);
  1034. kfree(sgl);
  1035. return desc;
  1036. }
  1037. static int rcar_dmac_device_config(struct dma_chan *chan,
  1038. struct dma_slave_config *cfg)
  1039. {
  1040. struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
  1041. /*
  1042. * We could lock this, but you shouldn't be configuring the
  1043. * channel, while using it...
  1044. */
  1045. rchan->src.slave_addr = cfg->src_addr;
  1046. rchan->dst.slave_addr = cfg->dst_addr;
  1047. rchan->src.xfer_size = cfg->src_addr_width;
  1048. rchan->dst.xfer_size = cfg->dst_addr_width;
  1049. return 0;
  1050. }
  1051. static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
  1052. {
  1053. struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
  1054. unsigned long flags;
  1055. spin_lock_irqsave(&rchan->lock, flags);
  1056. rcar_dmac_chan_halt(rchan);
  1057. spin_unlock_irqrestore(&rchan->lock, flags);
  1058. /*
  1059. * FIXME: No new interrupt can occur now, but the IRQ thread might still
  1060. * be running.
  1061. */
  1062. rcar_dmac_chan_reinit(rchan);
  1063. return 0;
  1064. }
  1065. static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
  1066. dma_cookie_t cookie)
  1067. {
  1068. struct rcar_dmac_desc *desc = chan->desc.running;
  1069. struct rcar_dmac_xfer_chunk *running = NULL;
  1070. struct rcar_dmac_xfer_chunk *chunk;
  1071. enum dma_status status;
  1072. unsigned int residue = 0;
  1073. unsigned int dptr = 0;
  1074. unsigned int chcrb;
  1075. unsigned int tcrb;
  1076. unsigned int i;
  1077. if (!desc)
  1078. return 0;
  1079. /*
  1080. * If the cookie corresponds to a descriptor that has been completed
  1081. * there is no residue. The same check has already been performed by the
  1082. * caller but without holding the channel lock, so the descriptor could
  1083. * now be complete.
  1084. */
  1085. status = dma_cookie_status(&chan->chan, cookie, NULL);
  1086. if (status == DMA_COMPLETE)
  1087. return 0;
  1088. /*
  1089. * If the cookie doesn't correspond to the currently running transfer
  1090. * then the descriptor hasn't been processed yet, and the residue is
  1091. * equal to the full descriptor size.
  1092. * Also, a client driver is possible to call this function before
  1093. * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running"
  1094. * will be the next descriptor, and the done list will appear. So, if
  1095. * the argument cookie matches the done list's cookie, we can assume
  1096. * the residue is zero.
  1097. */
  1098. if (cookie != desc->async_tx.cookie) {
  1099. list_for_each_entry(desc, &chan->desc.done, node) {
  1100. if (cookie == desc->async_tx.cookie)
  1101. return 0;
  1102. }
  1103. list_for_each_entry(desc, &chan->desc.pending, node) {
  1104. if (cookie == desc->async_tx.cookie)
  1105. return desc->size;
  1106. }
  1107. list_for_each_entry(desc, &chan->desc.active, node) {
  1108. if (cookie == desc->async_tx.cookie)
  1109. return desc->size;
  1110. }
  1111. /*
  1112. * No descriptor found for the cookie, there's thus no residue.
  1113. * This shouldn't happen if the calling driver passes a correct
  1114. * cookie value.
  1115. */
  1116. WARN(1, "No descriptor for cookie!");
  1117. return 0;
  1118. }
  1119. /*
  1120. * We need to read two registers.
  1121. * Make sure the control register does not skip to next chunk
  1122. * while reading the counter.
  1123. * Trying it 3 times should be enough: Initial read, retry, retry
  1124. * for the paranoid.
  1125. */
  1126. for (i = 0; i < 3; i++) {
  1127. chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
  1128. RCAR_DMACHCRB_DPTR_MASK;
  1129. tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
  1130. /* Still the same? */
  1131. if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
  1132. RCAR_DMACHCRB_DPTR_MASK))
  1133. break;
  1134. }
  1135. WARN_ONCE(i >= 3, "residue might be not continuous!");
  1136. /*
  1137. * In descriptor mode the descriptor running pointer is not maintained
  1138. * by the interrupt handler, find the running descriptor from the
  1139. * descriptor pointer field in the CHCRB register. In non-descriptor
  1140. * mode just use the running descriptor pointer.
  1141. */
  1142. if (desc->hwdescs.use) {
  1143. dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
  1144. if (dptr == 0)
  1145. dptr = desc->nchunks;
  1146. dptr--;
  1147. WARN_ON(dptr >= desc->nchunks);
  1148. } else {
  1149. running = desc->running;
  1150. }
  1151. /* Compute the size of all chunks still to be transferred. */
  1152. list_for_each_entry_reverse(chunk, &desc->chunks, node) {
  1153. if (chunk == running || ++dptr == desc->nchunks)
  1154. break;
  1155. residue += chunk->size;
  1156. }
  1157. /* Add the residue for the current chunk. */
  1158. residue += tcrb << desc->xfer_shift;
  1159. return residue;
  1160. }
  1161. static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
  1162. dma_cookie_t cookie,
  1163. struct dma_tx_state *txstate)
  1164. {
  1165. struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
  1166. enum dma_status status;
  1167. unsigned long flags;
  1168. unsigned int residue;
  1169. bool cyclic;
  1170. status = dma_cookie_status(chan, cookie, txstate);
  1171. if (status == DMA_COMPLETE || !txstate)
  1172. return status;
  1173. spin_lock_irqsave(&rchan->lock, flags);
  1174. residue = rcar_dmac_chan_get_residue(rchan, cookie);
  1175. cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
  1176. spin_unlock_irqrestore(&rchan->lock, flags);
  1177. /* if there's no residue, the cookie is complete */
  1178. if (!residue && !cyclic)
  1179. return DMA_COMPLETE;
  1180. dma_set_residue(txstate, residue);
  1181. return status;
  1182. }
  1183. static void rcar_dmac_issue_pending(struct dma_chan *chan)
  1184. {
  1185. struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
  1186. unsigned long flags;
  1187. spin_lock_irqsave(&rchan->lock, flags);
  1188. if (list_empty(&rchan->desc.pending))
  1189. goto done;
  1190. /* Append the pending list to the active list. */
  1191. list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
  1192. /*
  1193. * If no transfer is running pick the first descriptor from the active
  1194. * list and start the transfer.
  1195. */
  1196. if (!rchan->desc.running) {
  1197. struct rcar_dmac_desc *desc;
  1198. desc = list_first_entry(&rchan->desc.active,
  1199. struct rcar_dmac_desc, node);
  1200. rchan->desc.running = desc;
  1201. rcar_dmac_chan_start_xfer(rchan);
  1202. }
  1203. done:
  1204. spin_unlock_irqrestore(&rchan->lock, flags);
  1205. }
  1206. static void rcar_dmac_device_synchronize(struct dma_chan *chan)
  1207. {
  1208. struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
  1209. synchronize_irq(rchan->irq);
  1210. }
  1211. /* -----------------------------------------------------------------------------
  1212. * IRQ handling
  1213. */
  1214. static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
  1215. {
  1216. struct rcar_dmac_desc *desc = chan->desc.running;
  1217. unsigned int stage;
  1218. if (WARN_ON(!desc || !desc->cyclic)) {
  1219. /*
  1220. * This should never happen, there should always be a running
  1221. * cyclic descriptor when a descriptor stage end interrupt is
  1222. * triggered. Warn and return.
  1223. */
  1224. return IRQ_NONE;
  1225. }
  1226. /* Program the interrupt pointer to the next stage. */
  1227. stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
  1228. RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
  1229. rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
  1230. return IRQ_WAKE_THREAD;
  1231. }
  1232. static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
  1233. {
  1234. struct rcar_dmac_desc *desc = chan->desc.running;
  1235. irqreturn_t ret = IRQ_WAKE_THREAD;
  1236. if (WARN_ON_ONCE(!desc)) {
  1237. /*
  1238. * This should never happen, there should always be a running
  1239. * descriptor when a transfer end interrupt is triggered. Warn
  1240. * and return.
  1241. */
  1242. return IRQ_NONE;
  1243. }
  1244. /*
  1245. * The transfer end interrupt isn't generated for each chunk when using
  1246. * descriptor mode. Only update the running chunk pointer in
  1247. * non-descriptor mode.
  1248. */
  1249. if (!desc->hwdescs.use) {
  1250. /*
  1251. * If we haven't completed the last transfer chunk simply move
  1252. * to the next one. Only wake the IRQ thread if the transfer is
  1253. * cyclic.
  1254. */
  1255. if (!list_is_last(&desc->running->node, &desc->chunks)) {
  1256. desc->running = list_next_entry(desc->running, node);
  1257. if (!desc->cyclic)
  1258. ret = IRQ_HANDLED;
  1259. goto done;
  1260. }
  1261. /*
  1262. * We've completed the last transfer chunk. If the transfer is
  1263. * cyclic, move back to the first one.
  1264. */
  1265. if (desc->cyclic) {
  1266. desc->running =
  1267. list_first_entry(&desc->chunks,
  1268. struct rcar_dmac_xfer_chunk,
  1269. node);
  1270. goto done;
  1271. }
  1272. }
  1273. /* The descriptor is complete, move it to the done list. */
  1274. list_move_tail(&desc->node, &chan->desc.done);
  1275. /* Queue the next descriptor, if any. */
  1276. if (!list_empty(&chan->desc.active))
  1277. chan->desc.running = list_first_entry(&chan->desc.active,
  1278. struct rcar_dmac_desc,
  1279. node);
  1280. else
  1281. chan->desc.running = NULL;
  1282. done:
  1283. if (chan->desc.running)
  1284. rcar_dmac_chan_start_xfer(chan);
  1285. return ret;
  1286. }
  1287. static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
  1288. {
  1289. u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
  1290. struct rcar_dmac_chan *chan = dev;
  1291. irqreturn_t ret = IRQ_NONE;
  1292. bool reinit = false;
  1293. u32 chcr;
  1294. spin_lock(&chan->lock);
  1295. chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
  1296. if (chcr & RCAR_DMACHCR_CAE) {
  1297. struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device);
  1298. /*
  1299. * We don't need to call rcar_dmac_chan_halt()
  1300. * because channel is already stopped in error case.
  1301. * We need to clear register and check DE bit as recovery.
  1302. */
  1303. rcar_dmac_write(dmac, RCAR_DMACHCLR, 1 << chan->index);
  1304. rcar_dmac_chcr_de_barrier(chan);
  1305. reinit = true;
  1306. goto spin_lock_end;
  1307. }
  1308. if (chcr & RCAR_DMACHCR_TE)
  1309. mask |= RCAR_DMACHCR_DE;
  1310. rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
  1311. if (mask & RCAR_DMACHCR_DE)
  1312. rcar_dmac_chcr_de_barrier(chan);
  1313. if (chcr & RCAR_DMACHCR_DSE)
  1314. ret |= rcar_dmac_isr_desc_stage_end(chan);
  1315. if (chcr & RCAR_DMACHCR_TE)
  1316. ret |= rcar_dmac_isr_transfer_end(chan);
  1317. spin_lock_end:
  1318. spin_unlock(&chan->lock);
  1319. if (reinit) {
  1320. dev_err(chan->chan.device->dev, "Channel Address Error\n");
  1321. rcar_dmac_chan_reinit(chan);
  1322. ret = IRQ_HANDLED;
  1323. }
  1324. return ret;
  1325. }
  1326. static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
  1327. {
  1328. struct rcar_dmac_chan *chan = dev;
  1329. struct rcar_dmac_desc *desc;
  1330. struct dmaengine_desc_callback cb;
  1331. spin_lock_irq(&chan->lock);
  1332. /* For cyclic transfers notify the user after every chunk. */
  1333. if (chan->desc.running && chan->desc.running->cyclic) {
  1334. desc = chan->desc.running;
  1335. dmaengine_desc_get_callback(&desc->async_tx, &cb);
  1336. if (dmaengine_desc_callback_valid(&cb)) {
  1337. spin_unlock_irq(&chan->lock);
  1338. dmaengine_desc_callback_invoke(&cb, NULL);
  1339. spin_lock_irq(&chan->lock);
  1340. }
  1341. }
  1342. /*
  1343. * Call the callback function for all descriptors on the done list and
  1344. * move them to the ack wait list.
  1345. */
  1346. while (!list_empty(&chan->desc.done)) {
  1347. desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
  1348. node);
  1349. dma_cookie_complete(&desc->async_tx);
  1350. list_del(&desc->node);
  1351. dmaengine_desc_get_callback(&desc->async_tx, &cb);
  1352. if (dmaengine_desc_callback_valid(&cb)) {
  1353. spin_unlock_irq(&chan->lock);
  1354. /*
  1355. * We own the only reference to this descriptor, we can
  1356. * safely dereference it without holding the channel
  1357. * lock.
  1358. */
  1359. dmaengine_desc_callback_invoke(&cb, NULL);
  1360. spin_lock_irq(&chan->lock);
  1361. }
  1362. list_add_tail(&desc->node, &chan->desc.wait);
  1363. }
  1364. spin_unlock_irq(&chan->lock);
  1365. /* Recycle all acked descriptors. */
  1366. rcar_dmac_desc_recycle_acked(chan);
  1367. return IRQ_HANDLED;
  1368. }
  1369. /* -----------------------------------------------------------------------------
  1370. * OF xlate and channel filter
  1371. */
  1372. static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
  1373. {
  1374. struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
  1375. struct of_phandle_args *dma_spec = arg;
  1376. /*
  1377. * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
  1378. * function knows from which device it wants to allocate a channel from,
  1379. * and would be perfectly capable of selecting the channel it wants.
  1380. * Forcing it to call dma_request_channel() and iterate through all
  1381. * channels from all controllers is just pointless.
  1382. */
  1383. if (chan->device->device_config != rcar_dmac_device_config ||
  1384. dma_spec->np != chan->device->dev->of_node)
  1385. return false;
  1386. return !test_and_set_bit(dma_spec->args[0], dmac->modules);
  1387. }
  1388. static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
  1389. struct of_dma *ofdma)
  1390. {
  1391. struct rcar_dmac_chan *rchan;
  1392. struct dma_chan *chan;
  1393. dma_cap_mask_t mask;
  1394. if (dma_spec->args_count != 1)
  1395. return NULL;
  1396. /* Only slave DMA channels can be allocated via DT */
  1397. dma_cap_zero(mask);
  1398. dma_cap_set(DMA_SLAVE, mask);
  1399. chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
  1400. if (!chan)
  1401. return NULL;
  1402. rchan = to_rcar_dmac_chan(chan);
  1403. rchan->mid_rid = dma_spec->args[0];
  1404. return chan;
  1405. }
  1406. /* -----------------------------------------------------------------------------
  1407. * Power management
  1408. */
  1409. #ifdef CONFIG_PM
  1410. static int rcar_dmac_runtime_suspend(struct device *dev)
  1411. {
  1412. return 0;
  1413. }
  1414. static int rcar_dmac_runtime_resume(struct device *dev)
  1415. {
  1416. struct rcar_dmac *dmac = dev_get_drvdata(dev);
  1417. return rcar_dmac_init(dmac);
  1418. }
  1419. #endif
  1420. static const struct dev_pm_ops rcar_dmac_pm = {
  1421. /*
  1422. * TODO for system sleep/resume:
  1423. * - Wait for the current transfer to complete and stop the device,
  1424. * - Resume transfers, if any.
  1425. */
  1426. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
  1427. pm_runtime_force_resume)
  1428. SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
  1429. NULL)
  1430. };
  1431. /* -----------------------------------------------------------------------------
  1432. * Probe and remove
  1433. */
  1434. static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
  1435. struct rcar_dmac_chan *rchan,
  1436. unsigned int index)
  1437. {
  1438. struct platform_device *pdev = to_platform_device(dmac->dev);
  1439. struct dma_chan *chan = &rchan->chan;
  1440. char pdev_irqname[5];
  1441. char *irqname;
  1442. int ret;
  1443. rchan->index = index;
  1444. rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
  1445. rchan->mid_rid = -EINVAL;
  1446. spin_lock_init(&rchan->lock);
  1447. INIT_LIST_HEAD(&rchan->desc.free);
  1448. INIT_LIST_HEAD(&rchan->desc.pending);
  1449. INIT_LIST_HEAD(&rchan->desc.active);
  1450. INIT_LIST_HEAD(&rchan->desc.done);
  1451. INIT_LIST_HEAD(&rchan->desc.wait);
  1452. /* Request the channel interrupt. */
  1453. sprintf(pdev_irqname, "ch%u", index);
  1454. rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
  1455. if (rchan->irq < 0) {
  1456. dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
  1457. return -ENODEV;
  1458. }
  1459. irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
  1460. dev_name(dmac->dev), index);
  1461. if (!irqname)
  1462. return -ENOMEM;
  1463. /*
  1464. * Initialize the DMA engine channel and add it to the DMA engine
  1465. * channels list.
  1466. */
  1467. chan->device = &dmac->engine;
  1468. dma_cookie_init(chan);
  1469. list_add_tail(&chan->device_node, &dmac->engine.channels);
  1470. ret = devm_request_threaded_irq(dmac->dev, rchan->irq,
  1471. rcar_dmac_isr_channel,
  1472. rcar_dmac_isr_channel_thread, 0,
  1473. irqname, rchan);
  1474. if (ret) {
  1475. dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
  1476. rchan->irq, ret);
  1477. return ret;
  1478. }
  1479. return 0;
  1480. }
  1481. static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
  1482. {
  1483. struct device_node *np = dev->of_node;
  1484. int ret;
  1485. ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
  1486. if (ret < 0) {
  1487. dev_err(dev, "unable to read dma-channels property\n");
  1488. return ret;
  1489. }
  1490. if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
  1491. dev_err(dev, "invalid number of channels %u\n",
  1492. dmac->n_channels);
  1493. return -EINVAL;
  1494. }
  1495. return 0;
  1496. }
  1497. static int rcar_dmac_probe(struct platform_device *pdev)
  1498. {
  1499. const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
  1500. DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
  1501. DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
  1502. DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
  1503. unsigned int channels_offset = 0;
  1504. struct dma_device *engine;
  1505. struct rcar_dmac *dmac;
  1506. struct resource *mem;
  1507. unsigned int i;
  1508. int ret;
  1509. dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
  1510. if (!dmac)
  1511. return -ENOMEM;
  1512. dmac->dev = &pdev->dev;
  1513. platform_set_drvdata(pdev, dmac);
  1514. dmac->dev->dma_parms = &dmac->parms;
  1515. dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
  1516. dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
  1517. ret = rcar_dmac_parse_of(&pdev->dev, dmac);
  1518. if (ret < 0)
  1519. return ret;
  1520. /*
  1521. * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be
  1522. * flushed correctly, resulting in memory corruption. DMAC 0 channel 0
  1523. * is connected to microTLB 0 on currently supported platforms, so we
  1524. * can't use it with the IPMMU. As the IOMMU API operates at the device
  1525. * level we can't disable it selectively, so ignore channel 0 for now if
  1526. * the device is part of an IOMMU group.
  1527. */
  1528. if (pdev->dev.iommu_group) {
  1529. dmac->n_channels--;
  1530. channels_offset = 1;
  1531. }
  1532. dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
  1533. sizeof(*dmac->channels), GFP_KERNEL);
  1534. if (!dmac->channels)
  1535. return -ENOMEM;
  1536. /* Request resources. */
  1537. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1538. dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
  1539. if (IS_ERR(dmac->iomem))
  1540. return PTR_ERR(dmac->iomem);
  1541. /* Enable runtime PM and initialize the device. */
  1542. pm_runtime_enable(&pdev->dev);
  1543. ret = pm_runtime_get_sync(&pdev->dev);
  1544. if (ret < 0) {
  1545. dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
  1546. return ret;
  1547. }
  1548. ret = rcar_dmac_init(dmac);
  1549. pm_runtime_put(&pdev->dev);
  1550. if (ret) {
  1551. dev_err(&pdev->dev, "failed to reset device\n");
  1552. goto error;
  1553. }
  1554. /* Initialize engine */
  1555. engine = &dmac->engine;
  1556. dma_cap_set(DMA_MEMCPY, engine->cap_mask);
  1557. dma_cap_set(DMA_SLAVE, engine->cap_mask);
  1558. engine->dev = &pdev->dev;
  1559. engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
  1560. engine->src_addr_widths = widths;
  1561. engine->dst_addr_widths = widths;
  1562. engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
  1563. engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1564. engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
  1565. engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
  1566. engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
  1567. engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
  1568. engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
  1569. engine->device_config = rcar_dmac_device_config;
  1570. engine->device_pause = rcar_dmac_chan_pause;
  1571. engine->device_terminate_all = rcar_dmac_chan_terminate_all;
  1572. engine->device_tx_status = rcar_dmac_tx_status;
  1573. engine->device_issue_pending = rcar_dmac_issue_pending;
  1574. engine->device_synchronize = rcar_dmac_device_synchronize;
  1575. INIT_LIST_HEAD(&engine->channels);
  1576. for (i = 0; i < dmac->n_channels; ++i) {
  1577. ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
  1578. i + channels_offset);
  1579. if (ret < 0)
  1580. goto error;
  1581. }
  1582. /* Register the DMAC as a DMA provider for DT. */
  1583. ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
  1584. NULL);
  1585. if (ret < 0)
  1586. goto error;
  1587. /*
  1588. * Register the DMA engine device.
  1589. *
  1590. * Default transfer size of 32 bytes requires 32-byte alignment.
  1591. */
  1592. ret = dma_async_device_register(engine);
  1593. if (ret < 0)
  1594. goto error;
  1595. return 0;
  1596. error:
  1597. of_dma_controller_free(pdev->dev.of_node);
  1598. pm_runtime_disable(&pdev->dev);
  1599. return ret;
  1600. }
  1601. static int rcar_dmac_remove(struct platform_device *pdev)
  1602. {
  1603. struct rcar_dmac *dmac = platform_get_drvdata(pdev);
  1604. of_dma_controller_free(pdev->dev.of_node);
  1605. dma_async_device_unregister(&dmac->engine);
  1606. pm_runtime_disable(&pdev->dev);
  1607. return 0;
  1608. }
  1609. static void rcar_dmac_shutdown(struct platform_device *pdev)
  1610. {
  1611. struct rcar_dmac *dmac = platform_get_drvdata(pdev);
  1612. rcar_dmac_stop_all_chan(dmac);
  1613. }
  1614. static const struct of_device_id rcar_dmac_of_ids[] = {
  1615. { .compatible = "renesas,rcar-dmac", },
  1616. { /* Sentinel */ }
  1617. };
  1618. MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
  1619. static struct platform_driver rcar_dmac_driver = {
  1620. .driver = {
  1621. .pm = &rcar_dmac_pm,
  1622. .name = "rcar-dmac",
  1623. .of_match_table = rcar_dmac_of_ids,
  1624. },
  1625. .probe = rcar_dmac_probe,
  1626. .remove = rcar_dmac_remove,
  1627. .shutdown = rcar_dmac_shutdown,
  1628. };
  1629. module_platform_driver(rcar_dmac_driver);
  1630. MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
  1631. MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
  1632. MODULE_LICENSE("GPL v2");