mv_xor_v2.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931
  1. /*
  2. * Copyright (C) 2015-2016 Marvell International Ltd.
  3. * This program is free software: you can redistribute it and/or
  4. * modify it under the terms of the GNU General Public License as
  5. * published by the Free Software Foundation, either version 2 of the
  6. * License, or any later version.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/io.h>
  17. #include <linux/module.h>
  18. #include <linux/msi.h>
  19. #include <linux/of.h>
  20. #include <linux/of_irq.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/spinlock.h>
  23. #include "dmaengine.h"
  24. /* DMA Engine Registers */
  25. #define MV_XOR_V2_DMA_DESQ_BALR_OFF 0x000
  26. #define MV_XOR_V2_DMA_DESQ_BAHR_OFF 0x004
  27. #define MV_XOR_V2_DMA_DESQ_SIZE_OFF 0x008
  28. #define MV_XOR_V2_DMA_DESQ_DONE_OFF 0x00C
  29. #define MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK 0x7FFF
  30. #define MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT 0
  31. #define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK 0x1FFF
  32. #define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT 16
  33. #define MV_XOR_V2_DMA_DESQ_ARATTR_OFF 0x010
  34. #define MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK 0x3F3F
  35. #define MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE 0x202
  36. #define MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE 0x3C3C
  37. #define MV_XOR_V2_DMA_IMSG_CDAT_OFF 0x014
  38. #define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018
  39. #define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF
  40. #define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0
  41. #define MV_XOR_V2_DMA_IMSG_TIMER_EN BIT(18)
  42. #define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C
  43. /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
  44. #define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C
  45. #define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK 0xFFFF
  46. #define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT 16
  47. #define MV_XOR_V2_DMA_IMSG_BALR_OFF 0x050
  48. #define MV_XOR_V2_DMA_IMSG_BAHR_OFF 0x054
  49. #define MV_XOR_V2_DMA_DESQ_CTRL_OFF 0x100
  50. #define MV_XOR_V2_DMA_DESQ_CTRL_32B 1
  51. #define MV_XOR_V2_DMA_DESQ_CTRL_128B 7
  52. #define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800
  53. #define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804
  54. #define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808
  55. #define MV_XOR_V2_DMA_IMSG_TMOT 0x810
  56. #define MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK 0x1FFF
  57. #define MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT 0
  58. /* XOR Global registers */
  59. #define MV_XOR_V2_GLOB_BW_CTRL 0x4
  60. #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT 0
  61. #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL 64
  62. #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT 8
  63. #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL 8
  64. #define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT 12
  65. #define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL 4
  66. #define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT 16
  67. #define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL 4
  68. #define MV_XOR_V2_GLOB_PAUSE 0x014
  69. #define MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL 0x8
  70. #define MV_XOR_V2_GLOB_SYS_INT_CAUSE 0x200
  71. #define MV_XOR_V2_GLOB_SYS_INT_MASK 0x204
  72. #define MV_XOR_V2_GLOB_MEM_INT_CAUSE 0x220
  73. #define MV_XOR_V2_GLOB_MEM_INT_MASK 0x224
  74. #define MV_XOR_V2_MIN_DESC_SIZE 32
  75. #define MV_XOR_V2_EXT_DESC_SIZE 128
  76. #define MV_XOR_V2_DESC_RESERVED_SIZE 12
  77. #define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE 12
  78. #define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF 8
  79. /*
  80. * Descriptors queue size. With 32 bytes descriptors, up to 2^14
  81. * descriptors are allowed, with 128 bytes descriptors, up to 2^12
  82. * descriptors are allowed. This driver uses 128 bytes descriptors,
  83. * but experimentation has shown that a set of 1024 descriptors is
  84. * sufficient to reach a good level of performance.
  85. */
  86. #define MV_XOR_V2_DESC_NUM 1024
  87. /*
  88. * Threshold values for descriptors and timeout, determined by
  89. * experimentation as giving a good level of performance.
  90. */
  91. #define MV_XOR_V2_DONE_IMSG_THRD 0x14
  92. #define MV_XOR_V2_TIMER_THRD 0xB0
  93. /**
  94. * struct mv_xor_v2_descriptor - DMA HW descriptor
  95. * @desc_id: used by S/W and is not affected by H/W.
  96. * @flags: error and status flags
  97. * @crc32_result: CRC32 calculation result
  98. * @desc_ctrl: operation mode and control flags
  99. * @buff_size: amount of bytes to be processed
  100. * @fill_pattern_src_addr: Fill-Pattern or Source-Address and
  101. * AW-Attributes
  102. * @data_buff_addr: Source (and might be RAID6 destination)
  103. * addresses of data buffers in RAID5 and RAID6
  104. * @reserved: reserved
  105. */
  106. struct mv_xor_v2_descriptor {
  107. u16 desc_id;
  108. u16 flags;
  109. u32 crc32_result;
  110. u32 desc_ctrl;
  111. /* Definitions for desc_ctrl */
  112. #define DESC_NUM_ACTIVE_D_BUF_SHIFT 22
  113. #define DESC_OP_MODE_SHIFT 28
  114. #define DESC_OP_MODE_NOP 0 /* Idle operation */
  115. #define DESC_OP_MODE_MEMCPY 1 /* Pure-DMA operation */
  116. #define DESC_OP_MODE_MEMSET 2 /* Mem-Fill operation */
  117. #define DESC_OP_MODE_MEMINIT 3 /* Mem-Init operation */
  118. #define DESC_OP_MODE_MEM_COMPARE 4 /* Mem-Compare operation */
  119. #define DESC_OP_MODE_CRC32 5 /* CRC32 calculation */
  120. #define DESC_OP_MODE_XOR 6 /* RAID5 (XOR) operation */
  121. #define DESC_OP_MODE_RAID6 7 /* RAID6 P&Q-generation */
  122. #define DESC_OP_MODE_RAID6_REC 8 /* RAID6 Recovery */
  123. #define DESC_Q_BUFFER_ENABLE BIT(16)
  124. #define DESC_P_BUFFER_ENABLE BIT(17)
  125. #define DESC_IOD BIT(27)
  126. u32 buff_size;
  127. u32 fill_pattern_src_addr[4];
  128. u32 data_buff_addr[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE];
  129. u32 reserved[MV_XOR_V2_DESC_RESERVED_SIZE];
  130. };
  131. /**
  132. * struct mv_xor_v2_device - implements a xor device
  133. * @lock: lock for the engine
  134. * @dma_base: memory mapped DMA register base
  135. * @glob_base: memory mapped global register base
  136. * @irq_tasklet:
  137. * @free_sw_desc: linked list of free SW descriptors
  138. * @dmadev: dma device
  139. * @dmachan: dma channel
  140. * @hw_desq: HW descriptors queue
  141. * @hw_desq_virt: virtual address of DESCQ
  142. * @sw_desq: SW descriptors queue
  143. * @desc_size: HW descriptor size
  144. * @npendings: number of pending descriptors (for which tx_submit has
  145. * been called, but not yet issue_pending)
  146. */
  147. struct mv_xor_v2_device {
  148. spinlock_t lock;
  149. void __iomem *dma_base;
  150. void __iomem *glob_base;
  151. struct clk *clk;
  152. struct clk *reg_clk;
  153. struct tasklet_struct irq_tasklet;
  154. struct list_head free_sw_desc;
  155. struct dma_device dmadev;
  156. struct dma_chan dmachan;
  157. dma_addr_t hw_desq;
  158. struct mv_xor_v2_descriptor *hw_desq_virt;
  159. struct mv_xor_v2_sw_desc *sw_desq;
  160. int desc_size;
  161. unsigned int npendings;
  162. unsigned int hw_queue_idx;
  163. };
  164. /**
  165. * struct mv_xor_v2_sw_desc - implements a xor SW descriptor
  166. * @idx: descriptor index
  167. * @async_tx: support for the async_tx api
  168. * @hw_desc: assosiated HW descriptor
  169. * @free_list: node of the free SW descriprots list
  170. */
  171. struct mv_xor_v2_sw_desc {
  172. int idx;
  173. struct dma_async_tx_descriptor async_tx;
  174. struct mv_xor_v2_descriptor hw_desc;
  175. struct list_head free_list;
  176. };
  177. /*
  178. * Fill the data buffers to a HW descriptor
  179. */
  180. static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
  181. struct mv_xor_v2_descriptor *desc,
  182. dma_addr_t src, int index)
  183. {
  184. int arr_index = ((index >> 1) * 3);
  185. /*
  186. * Fill the buffer's addresses to the descriptor.
  187. *
  188. * The format of the buffers address for 2 sequential buffers
  189. * X and X + 1:
  190. *
  191. * First word: Buffer-DX-Address-Low[31:0]
  192. * Second word: Buffer-DX+1-Address-Low[31:0]
  193. * Third word: DX+1-Buffer-Address-High[47:32] [31:16]
  194. * DX-Buffer-Address-High[47:32] [15:0]
  195. */
  196. if ((index & 0x1) == 0) {
  197. desc->data_buff_addr[arr_index] = lower_32_bits(src);
  198. desc->data_buff_addr[arr_index + 2] &= ~0xFFFF;
  199. desc->data_buff_addr[arr_index + 2] |=
  200. upper_32_bits(src) & 0xFFFF;
  201. } else {
  202. desc->data_buff_addr[arr_index + 1] =
  203. lower_32_bits(src);
  204. desc->data_buff_addr[arr_index + 2] &= ~0xFFFF0000;
  205. desc->data_buff_addr[arr_index + 2] |=
  206. (upper_32_bits(src) & 0xFFFF) << 16;
  207. }
  208. }
  209. /*
  210. * notify the engine of new descriptors, and update the available index.
  211. */
  212. static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
  213. int num_of_desc)
  214. {
  215. /* write the number of new descriptors in the DESQ. */
  216. writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ADD_OFF);
  217. }
  218. /*
  219. * free HW descriptors
  220. */
  221. static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device *xor_dev,
  222. int num_of_desc)
  223. {
  224. /* write the number of new descriptors in the DESQ. */
  225. writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DEALLOC_OFF);
  226. }
  227. /*
  228. * Set descriptor size
  229. * Return the HW descriptor size in bytes
  230. */
  231. static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
  232. {
  233. writel(MV_XOR_V2_DMA_DESQ_CTRL_128B,
  234. xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_CTRL_OFF);
  235. return MV_XOR_V2_EXT_DESC_SIZE;
  236. }
  237. /*
  238. * Set the IMSG threshold
  239. */
  240. static inline
  241. void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev)
  242. {
  243. u32 reg;
  244. /* Configure threshold of number of descriptors, and enable timer */
  245. reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
  246. reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
  247. reg |= (MV_XOR_V2_DONE_IMSG_THRD << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
  248. reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN;
  249. writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
  250. /* Configure Timer Threshold */
  251. reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
  252. reg &= (~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK <<
  253. MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
  254. reg |= (MV_XOR_V2_TIMER_THRD << MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
  255. writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
  256. }
  257. static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
  258. {
  259. struct mv_xor_v2_device *xor_dev = data;
  260. unsigned int ndescs;
  261. u32 reg;
  262. reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
  263. ndescs = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
  264. MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
  265. /* No descriptors to process */
  266. if (!ndescs)
  267. return IRQ_NONE;
  268. /* schedule a tasklet to handle descriptors callbacks */
  269. tasklet_schedule(&xor_dev->irq_tasklet);
  270. return IRQ_HANDLED;
  271. }
  272. /*
  273. * submit a descriptor to the DMA engine
  274. */
  275. static dma_cookie_t
  276. mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
  277. {
  278. void *dest_hw_desc;
  279. dma_cookie_t cookie;
  280. struct mv_xor_v2_sw_desc *sw_desc =
  281. container_of(tx, struct mv_xor_v2_sw_desc, async_tx);
  282. struct mv_xor_v2_device *xor_dev =
  283. container_of(tx->chan, struct mv_xor_v2_device, dmachan);
  284. dev_dbg(xor_dev->dmadev.dev,
  285. "%s sw_desc %p: async_tx %p\n",
  286. __func__, sw_desc, &sw_desc->async_tx);
  287. /* assign coookie */
  288. spin_lock_bh(&xor_dev->lock);
  289. cookie = dma_cookie_assign(tx);
  290. /* copy the HW descriptor from the SW descriptor to the DESQ */
  291. dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
  292. memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
  293. xor_dev->npendings++;
  294. xor_dev->hw_queue_idx++;
  295. if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
  296. xor_dev->hw_queue_idx = 0;
  297. spin_unlock_bh(&xor_dev->lock);
  298. return cookie;
  299. }
  300. /*
  301. * Prepare a SW descriptor
  302. */
  303. static struct mv_xor_v2_sw_desc *
  304. mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
  305. {
  306. struct mv_xor_v2_sw_desc *sw_desc;
  307. bool found = false;
  308. /* Lock the channel */
  309. spin_lock_bh(&xor_dev->lock);
  310. if (list_empty(&xor_dev->free_sw_desc)) {
  311. spin_unlock_bh(&xor_dev->lock);
  312. /* schedule tasklet to free some descriptors */
  313. tasklet_schedule(&xor_dev->irq_tasklet);
  314. return NULL;
  315. }
  316. list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
  317. if (async_tx_test_ack(&sw_desc->async_tx)) {
  318. found = true;
  319. break;
  320. }
  321. }
  322. if (!found) {
  323. spin_unlock_bh(&xor_dev->lock);
  324. return NULL;
  325. }
  326. list_del(&sw_desc->free_list);
  327. /* Release the channel */
  328. spin_unlock_bh(&xor_dev->lock);
  329. return sw_desc;
  330. }
  331. /*
  332. * Prepare a HW descriptor for a memcpy operation
  333. */
  334. static struct dma_async_tx_descriptor *
  335. mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
  336. dma_addr_t src, size_t len, unsigned long flags)
  337. {
  338. struct mv_xor_v2_sw_desc *sw_desc;
  339. struct mv_xor_v2_descriptor *hw_descriptor;
  340. struct mv_xor_v2_device *xor_dev;
  341. xor_dev = container_of(chan, struct mv_xor_v2_device, dmachan);
  342. dev_dbg(xor_dev->dmadev.dev,
  343. "%s len: %zu src %pad dest %pad flags: %ld\n",
  344. __func__, len, &src, &dest, flags);
  345. sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
  346. if (!sw_desc)
  347. return NULL;
  348. sw_desc->async_tx.flags = flags;
  349. /* set the HW descriptor */
  350. hw_descriptor = &sw_desc->hw_desc;
  351. /* save the SW descriptor ID to restore when operation is done */
  352. hw_descriptor->desc_id = sw_desc->idx;
  353. /* Set the MEMCPY control word */
  354. hw_descriptor->desc_ctrl =
  355. DESC_OP_MODE_MEMCPY << DESC_OP_MODE_SHIFT;
  356. if (flags & DMA_PREP_INTERRUPT)
  357. hw_descriptor->desc_ctrl |= DESC_IOD;
  358. /* Set source address */
  359. hw_descriptor->fill_pattern_src_addr[0] = lower_32_bits(src);
  360. hw_descriptor->fill_pattern_src_addr[1] =
  361. upper_32_bits(src) & 0xFFFF;
  362. /* Set Destination address */
  363. hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
  364. hw_descriptor->fill_pattern_src_addr[3] =
  365. upper_32_bits(dest) & 0xFFFF;
  366. /* Set buffers size */
  367. hw_descriptor->buff_size = len;
  368. /* return the async tx descriptor */
  369. return &sw_desc->async_tx;
  370. }
  371. /*
  372. * Prepare a HW descriptor for a XOR operation
  373. */
  374. static struct dma_async_tx_descriptor *
  375. mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
  376. unsigned int src_cnt, size_t len, unsigned long flags)
  377. {
  378. struct mv_xor_v2_sw_desc *sw_desc;
  379. struct mv_xor_v2_descriptor *hw_descriptor;
  380. struct mv_xor_v2_device *xor_dev =
  381. container_of(chan, struct mv_xor_v2_device, dmachan);
  382. int i;
  383. if (src_cnt > MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF || src_cnt < 1)
  384. return NULL;
  385. dev_dbg(xor_dev->dmadev.dev,
  386. "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
  387. __func__, src_cnt, len, &dest, flags);
  388. sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
  389. if (!sw_desc)
  390. return NULL;
  391. sw_desc->async_tx.flags = flags;
  392. /* set the HW descriptor */
  393. hw_descriptor = &sw_desc->hw_desc;
  394. /* save the SW descriptor ID to restore when operation is done */
  395. hw_descriptor->desc_id = sw_desc->idx;
  396. /* Set the XOR control word */
  397. hw_descriptor->desc_ctrl =
  398. DESC_OP_MODE_XOR << DESC_OP_MODE_SHIFT;
  399. hw_descriptor->desc_ctrl |= DESC_P_BUFFER_ENABLE;
  400. if (flags & DMA_PREP_INTERRUPT)
  401. hw_descriptor->desc_ctrl |= DESC_IOD;
  402. /* Set the data buffers */
  403. for (i = 0; i < src_cnt; i++)
  404. mv_xor_v2_set_data_buffers(xor_dev, hw_descriptor, src[i], i);
  405. hw_descriptor->desc_ctrl |=
  406. src_cnt << DESC_NUM_ACTIVE_D_BUF_SHIFT;
  407. /* Set Destination address */
  408. hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
  409. hw_descriptor->fill_pattern_src_addr[3] =
  410. upper_32_bits(dest) & 0xFFFF;
  411. /* Set buffers size */
  412. hw_descriptor->buff_size = len;
  413. /* return the async tx descriptor */
  414. return &sw_desc->async_tx;
  415. }
  416. /*
  417. * Prepare a HW descriptor for interrupt operation.
  418. */
  419. static struct dma_async_tx_descriptor *
  420. mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
  421. {
  422. struct mv_xor_v2_sw_desc *sw_desc;
  423. struct mv_xor_v2_descriptor *hw_descriptor;
  424. struct mv_xor_v2_device *xor_dev =
  425. container_of(chan, struct mv_xor_v2_device, dmachan);
  426. sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
  427. if (!sw_desc)
  428. return NULL;
  429. /* set the HW descriptor */
  430. hw_descriptor = &sw_desc->hw_desc;
  431. /* save the SW descriptor ID to restore when operation is done */
  432. hw_descriptor->desc_id = sw_desc->idx;
  433. /* Set the INTERRUPT control word */
  434. hw_descriptor->desc_ctrl =
  435. DESC_OP_MODE_NOP << DESC_OP_MODE_SHIFT;
  436. hw_descriptor->desc_ctrl |= DESC_IOD;
  437. /* return the async tx descriptor */
  438. return &sw_desc->async_tx;
  439. }
  440. /*
  441. * push pending transactions to hardware
  442. */
  443. static void mv_xor_v2_issue_pending(struct dma_chan *chan)
  444. {
  445. struct mv_xor_v2_device *xor_dev =
  446. container_of(chan, struct mv_xor_v2_device, dmachan);
  447. spin_lock_bh(&xor_dev->lock);
  448. /*
  449. * update the engine with the number of descriptors to
  450. * process
  451. */
  452. mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings);
  453. xor_dev->npendings = 0;
  454. spin_unlock_bh(&xor_dev->lock);
  455. }
  456. static inline
  457. int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev,
  458. int *pending_ptr)
  459. {
  460. u32 reg;
  461. reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
  462. /* get the next pending descriptor index */
  463. *pending_ptr = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT) &
  464. MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK);
  465. /* get the number of descriptors pending handle */
  466. return ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
  467. MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
  468. }
  469. /*
  470. * handle the descriptors after HW process
  471. */
  472. static void mv_xor_v2_tasklet(unsigned long data)
  473. {
  474. struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
  475. int pending_ptr, num_of_pending, i;
  476. struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
  477. dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
  478. /* get the pending descriptors parameters */
  479. num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
  480. /* loop over free descriptors */
  481. for (i = 0; i < num_of_pending; i++) {
  482. struct mv_xor_v2_descriptor *next_pending_hw_desc =
  483. xor_dev->hw_desq_virt + pending_ptr;
  484. /* get the SW descriptor related to the HW descriptor */
  485. next_pending_sw_desc =
  486. &xor_dev->sw_desq[next_pending_hw_desc->desc_id];
  487. /* call the callback */
  488. if (next_pending_sw_desc->async_tx.cookie > 0) {
  489. /*
  490. * update the channel's completed cookie - no
  491. * lock is required the IMSG threshold provide
  492. * the locking
  493. */
  494. dma_cookie_complete(&next_pending_sw_desc->async_tx);
  495. if (next_pending_sw_desc->async_tx.callback)
  496. next_pending_sw_desc->async_tx.callback(
  497. next_pending_sw_desc->async_tx.callback_param);
  498. dma_descriptor_unmap(&next_pending_sw_desc->async_tx);
  499. }
  500. dma_run_dependencies(&next_pending_sw_desc->async_tx);
  501. /* Lock the channel */
  502. spin_lock_bh(&xor_dev->lock);
  503. /* add the SW descriptor to the free descriptors list */
  504. list_add(&next_pending_sw_desc->free_list,
  505. &xor_dev->free_sw_desc);
  506. /* Release the channel */
  507. spin_unlock_bh(&xor_dev->lock);
  508. /* increment the next descriptor */
  509. pending_ptr++;
  510. if (pending_ptr >= MV_XOR_V2_DESC_NUM)
  511. pending_ptr = 0;
  512. }
  513. if (num_of_pending != 0) {
  514. /* free the descriptores */
  515. mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
  516. }
  517. }
  518. /*
  519. * Set DMA Interrupt-message (IMSG) parameters
  520. */
  521. static void mv_xor_v2_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
  522. {
  523. struct mv_xor_v2_device *xor_dev = dev_get_drvdata(desc->dev);
  524. writel(msg->address_lo,
  525. xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BALR_OFF);
  526. writel(msg->address_hi & 0xFFFF,
  527. xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BAHR_OFF);
  528. writel(msg->data,
  529. xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_CDAT_OFF);
  530. }
  531. static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
  532. {
  533. u32 reg;
  534. /* write the DESQ size to the DMA engine */
  535. writel(MV_XOR_V2_DESC_NUM,
  536. xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF);
  537. /* write the DESQ address to the DMA enngine*/
  538. writel(xor_dev->hw_desq & 0xFFFFFFFF,
  539. xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF);
  540. writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
  541. xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
  542. /*
  543. * This is a temporary solution, until we activate the
  544. * SMMU. Set the attributes for reading & writing data buffers
  545. * & descriptors to:
  546. *
  547. * - OuterShareable - Snoops will be performed on CPU caches
  548. * - Enable cacheable - Bufferable, Modifiable, Other Allocate
  549. * and Allocate
  550. */
  551. reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
  552. reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
  553. reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
  554. MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
  555. writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
  556. reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
  557. reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
  558. reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
  559. MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
  560. writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
  561. /* BW CTRL - set values to optimize the XOR performance:
  562. *
  563. * - Set WrBurstLen & RdBurstLen - the unit will issue
  564. * maximum of 256B write/read transactions.
  565. * - Limit the number of outstanding write & read data
  566. * (OBB/IBB) requests to the maximal value.
  567. */
  568. reg = ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL <<
  569. MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT) |
  570. (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL <<
  571. MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT) |
  572. (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL <<
  573. MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT) |
  574. (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL <<
  575. MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT));
  576. writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_BW_CTRL);
  577. /* Disable the AXI timer feature */
  578. reg = readl(xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
  579. reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
  580. writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
  581. /* enable the DMA engine */
  582. writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
  583. return 0;
  584. }
  585. static int mv_xor_v2_suspend(struct platform_device *dev, pm_message_t state)
  586. {
  587. struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
  588. /* Set this bit to disable to stop the XOR unit. */
  589. writel(0x1, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
  590. return 0;
  591. }
  592. static int mv_xor_v2_resume(struct platform_device *dev)
  593. {
  594. struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
  595. mv_xor_v2_set_desc_size(xor_dev);
  596. mv_xor_v2_enable_imsg_thrd(xor_dev);
  597. mv_xor_v2_descq_init(xor_dev);
  598. return 0;
  599. }
  600. static int mv_xor_v2_probe(struct platform_device *pdev)
  601. {
  602. struct mv_xor_v2_device *xor_dev;
  603. struct resource *res;
  604. int i, ret = 0;
  605. struct dma_device *dma_dev;
  606. struct mv_xor_v2_sw_desc *sw_desc;
  607. struct msi_desc *msi_desc;
  608. BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) !=
  609. MV_XOR_V2_EXT_DESC_SIZE);
  610. xor_dev = devm_kzalloc(&pdev->dev, sizeof(*xor_dev), GFP_KERNEL);
  611. if (!xor_dev)
  612. return -ENOMEM;
  613. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  614. xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res);
  615. if (IS_ERR(xor_dev->dma_base))
  616. return PTR_ERR(xor_dev->dma_base);
  617. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  618. xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res);
  619. if (IS_ERR(xor_dev->glob_base))
  620. return PTR_ERR(xor_dev->glob_base);
  621. platform_set_drvdata(pdev, xor_dev);
  622. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
  623. if (ret)
  624. return ret;
  625. xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
  626. if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
  627. if (!IS_ERR(xor_dev->reg_clk)) {
  628. ret = clk_prepare_enable(xor_dev->reg_clk);
  629. if (ret)
  630. return ret;
  631. } else {
  632. return PTR_ERR(xor_dev->reg_clk);
  633. }
  634. }
  635. xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
  636. if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
  637. ret = EPROBE_DEFER;
  638. goto disable_reg_clk;
  639. }
  640. if (!IS_ERR(xor_dev->clk)) {
  641. ret = clk_prepare_enable(xor_dev->clk);
  642. if (ret)
  643. goto disable_reg_clk;
  644. }
  645. ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
  646. mv_xor_v2_set_msi_msg);
  647. if (ret)
  648. goto disable_clk;
  649. msi_desc = first_msi_entry(&pdev->dev);
  650. if (!msi_desc)
  651. goto free_msi_irqs;
  652. ret = devm_request_irq(&pdev->dev, msi_desc->irq,
  653. mv_xor_v2_interrupt_handler, 0,
  654. dev_name(&pdev->dev), xor_dev);
  655. if (ret)
  656. goto free_msi_irqs;
  657. tasklet_init(&xor_dev->irq_tasklet, mv_xor_v2_tasklet,
  658. (unsigned long) xor_dev);
  659. xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev);
  660. dma_cookie_init(&xor_dev->dmachan);
  661. /*
  662. * allocate coherent memory for hardware descriptors
  663. * note: writecombine gives slightly better performance, but
  664. * requires that we explicitly flush the writes
  665. */
  666. xor_dev->hw_desq_virt =
  667. dma_alloc_coherent(&pdev->dev,
  668. xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
  669. &xor_dev->hw_desq, GFP_KERNEL);
  670. if (!xor_dev->hw_desq_virt) {
  671. ret = -ENOMEM;
  672. goto free_msi_irqs;
  673. }
  674. /* alloc memory for the SW descriptors */
  675. xor_dev->sw_desq = devm_kzalloc(&pdev->dev, sizeof(*sw_desc) *
  676. MV_XOR_V2_DESC_NUM, GFP_KERNEL);
  677. if (!xor_dev->sw_desq) {
  678. ret = -ENOMEM;
  679. goto free_hw_desq;
  680. }
  681. spin_lock_init(&xor_dev->lock);
  682. /* init the free SW descriptors list */
  683. INIT_LIST_HEAD(&xor_dev->free_sw_desc);
  684. /* add all SW descriptors to the free list */
  685. for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
  686. struct mv_xor_v2_sw_desc *sw_desc =
  687. xor_dev->sw_desq + i;
  688. sw_desc->idx = i;
  689. dma_async_tx_descriptor_init(&sw_desc->async_tx,
  690. &xor_dev->dmachan);
  691. sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
  692. async_tx_ack(&sw_desc->async_tx);
  693. list_add(&sw_desc->free_list,
  694. &xor_dev->free_sw_desc);
  695. }
  696. dma_dev = &xor_dev->dmadev;
  697. /* set DMA capabilities */
  698. dma_cap_zero(dma_dev->cap_mask);
  699. dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
  700. dma_cap_set(DMA_XOR, dma_dev->cap_mask);
  701. dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
  702. /* init dma link list */
  703. INIT_LIST_HEAD(&dma_dev->channels);
  704. /* set base routines */
  705. dma_dev->device_tx_status = dma_cookie_status;
  706. dma_dev->device_issue_pending = mv_xor_v2_issue_pending;
  707. dma_dev->dev = &pdev->dev;
  708. dma_dev->device_prep_dma_memcpy = mv_xor_v2_prep_dma_memcpy;
  709. dma_dev->device_prep_dma_interrupt = mv_xor_v2_prep_dma_interrupt;
  710. dma_dev->max_xor = 8;
  711. dma_dev->device_prep_dma_xor = mv_xor_v2_prep_dma_xor;
  712. xor_dev->dmachan.device = dma_dev;
  713. list_add_tail(&xor_dev->dmachan.device_node,
  714. &dma_dev->channels);
  715. mv_xor_v2_enable_imsg_thrd(xor_dev);
  716. mv_xor_v2_descq_init(xor_dev);
  717. ret = dma_async_device_register(dma_dev);
  718. if (ret)
  719. goto free_hw_desq;
  720. dev_notice(&pdev->dev, "Marvell Version 2 XOR driver\n");
  721. return 0;
  722. free_hw_desq:
  723. dma_free_coherent(&pdev->dev,
  724. xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
  725. xor_dev->hw_desq_virt, xor_dev->hw_desq);
  726. free_msi_irqs:
  727. platform_msi_domain_free_irqs(&pdev->dev);
  728. disable_clk:
  729. clk_disable_unprepare(xor_dev->clk);
  730. disable_reg_clk:
  731. clk_disable_unprepare(xor_dev->reg_clk);
  732. return ret;
  733. }
  734. static int mv_xor_v2_remove(struct platform_device *pdev)
  735. {
  736. struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev);
  737. dma_async_device_unregister(&xor_dev->dmadev);
  738. dma_free_coherent(&pdev->dev,
  739. xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
  740. xor_dev->hw_desq_virt, xor_dev->hw_desq);
  741. platform_msi_domain_free_irqs(&pdev->dev);
  742. tasklet_kill(&xor_dev->irq_tasklet);
  743. clk_disable_unprepare(xor_dev->clk);
  744. return 0;
  745. }
  746. #ifdef CONFIG_OF
  747. static const struct of_device_id mv_xor_v2_dt_ids[] = {
  748. { .compatible = "marvell,xor-v2", },
  749. {},
  750. };
  751. MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids);
  752. #endif
  753. static struct platform_driver mv_xor_v2_driver = {
  754. .probe = mv_xor_v2_probe,
  755. .suspend = mv_xor_v2_suspend,
  756. .resume = mv_xor_v2_resume,
  757. .remove = mv_xor_v2_remove,
  758. .driver = {
  759. .name = "mv_xor_v2",
  760. .of_match_table = of_match_ptr(mv_xor_v2_dt_ids),
  761. },
  762. };
  763. module_platform_driver(mv_xor_v2_driver);
  764. MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine");
  765. MODULE_LICENSE("GPL");