mv_xor.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470
  1. /*
  2. * offload engine driver for the Marvell XOR engine
  3. * Copyright (C) 2007, 2008, Marvell International Ltd.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/init.h>
  15. #include <linux/slab.h>
  16. #include <linux/delay.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/of_device.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/memory.h>
  23. #include <linux/clk.h>
  24. #include <linux/of.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/irqdomain.h>
  27. #include <linux/cpumask.h>
  28. #include <linux/platform_data/dma-mv_xor.h>
  29. #include "dmaengine.h"
  30. #include "mv_xor.h"
  31. enum mv_xor_type {
  32. XOR_ORION,
  33. XOR_ARMADA_38X,
  34. XOR_ARMADA_37XX,
  35. };
  36. enum mv_xor_mode {
  37. XOR_MODE_IN_REG,
  38. XOR_MODE_IN_DESC,
  39. };
  40. static void mv_xor_issue_pending(struct dma_chan *chan);
  41. #define to_mv_xor_chan(chan) \
  42. container_of(chan, struct mv_xor_chan, dmachan)
  43. #define to_mv_xor_slot(tx) \
  44. container_of(tx, struct mv_xor_desc_slot, async_tx)
  45. #define mv_chan_to_devp(chan) \
  46. ((chan)->dmadev.dev)
  47. static void mv_desc_init(struct mv_xor_desc_slot *desc,
  48. dma_addr_t addr, u32 byte_count,
  49. enum dma_ctrl_flags flags)
  50. {
  51. struct mv_xor_desc *hw_desc = desc->hw_desc;
  52. hw_desc->status = XOR_DESC_DMA_OWNED;
  53. hw_desc->phy_next_desc = 0;
  54. /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
  55. hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
  56. XOR_DESC_EOD_INT_EN : 0;
  57. hw_desc->phy_dest_addr = addr;
  58. hw_desc->byte_count = byte_count;
  59. }
  60. static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
  61. {
  62. struct mv_xor_desc *hw_desc = desc->hw_desc;
  63. switch (desc->type) {
  64. case DMA_XOR:
  65. case DMA_INTERRUPT:
  66. hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
  67. break;
  68. case DMA_MEMCPY:
  69. hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
  70. break;
  71. default:
  72. BUG();
  73. return;
  74. }
  75. }
  76. static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
  77. u32 next_desc_addr)
  78. {
  79. struct mv_xor_desc *hw_desc = desc->hw_desc;
  80. BUG_ON(hw_desc->phy_next_desc);
  81. hw_desc->phy_next_desc = next_desc_addr;
  82. }
  83. static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
  84. int index, dma_addr_t addr)
  85. {
  86. struct mv_xor_desc *hw_desc = desc->hw_desc;
  87. hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
  88. if (desc->type == DMA_XOR)
  89. hw_desc->desc_command |= (1 << index);
  90. }
  91. static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
  92. {
  93. return readl_relaxed(XOR_CURR_DESC(chan));
  94. }
  95. static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
  96. u32 next_desc_addr)
  97. {
  98. writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
  99. }
  100. static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
  101. {
  102. u32 val = readl_relaxed(XOR_INTR_MASK(chan));
  103. val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
  104. writel_relaxed(val, XOR_INTR_MASK(chan));
  105. }
  106. static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
  107. {
  108. u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
  109. intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
  110. return intr_cause;
  111. }
  112. static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
  113. {
  114. u32 val;
  115. val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
  116. val = ~(val << (chan->idx * 16));
  117. dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
  118. writel_relaxed(val, XOR_INTR_CAUSE(chan));
  119. }
  120. static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
  121. {
  122. u32 val = 0xFFFF0000 >> (chan->idx * 16);
  123. writel_relaxed(val, XOR_INTR_CAUSE(chan));
  124. }
  125. static void mv_chan_set_mode(struct mv_xor_chan *chan,
  126. u32 op_mode)
  127. {
  128. u32 config = readl_relaxed(XOR_CONFIG(chan));
  129. config &= ~0x7;
  130. config |= op_mode;
  131. #if defined(__BIG_ENDIAN)
  132. config |= XOR_DESCRIPTOR_SWAP;
  133. #else
  134. config &= ~XOR_DESCRIPTOR_SWAP;
  135. #endif
  136. writel_relaxed(config, XOR_CONFIG(chan));
  137. }
  138. static void mv_chan_activate(struct mv_xor_chan *chan)
  139. {
  140. dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
  141. /* writel ensures all descriptors are flushed before activation */
  142. writel(BIT(0), XOR_ACTIVATION(chan));
  143. }
  144. static char mv_chan_is_busy(struct mv_xor_chan *chan)
  145. {
  146. u32 state = readl_relaxed(XOR_ACTIVATION(chan));
  147. state = (state >> 4) & 0x3;
  148. return (state == 1) ? 1 : 0;
  149. }
  150. /*
  151. * mv_chan_start_new_chain - program the engine to operate on new
  152. * chain headed by sw_desc
  153. * Caller must hold &mv_chan->lock while calling this function
  154. */
  155. static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
  156. struct mv_xor_desc_slot *sw_desc)
  157. {
  158. dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
  159. __func__, __LINE__, sw_desc);
  160. /* set the hardware chain */
  161. mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
  162. mv_chan->pending++;
  163. mv_xor_issue_pending(&mv_chan->dmachan);
  164. }
  165. static dma_cookie_t
  166. mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
  167. struct mv_xor_chan *mv_chan,
  168. dma_cookie_t cookie)
  169. {
  170. BUG_ON(desc->async_tx.cookie < 0);
  171. if (desc->async_tx.cookie > 0) {
  172. cookie = desc->async_tx.cookie;
  173. dma_descriptor_unmap(&desc->async_tx);
  174. /* call the callback (must not sleep or submit new
  175. * operations to this channel)
  176. */
  177. dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
  178. }
  179. /* run dependent operations */
  180. dma_run_dependencies(&desc->async_tx);
  181. return cookie;
  182. }
  183. static int
  184. mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
  185. {
  186. struct mv_xor_desc_slot *iter, *_iter;
  187. dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
  188. list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
  189. node) {
  190. if (async_tx_test_ack(&iter->async_tx))
  191. list_move_tail(&iter->node, &mv_chan->free_slots);
  192. }
  193. return 0;
  194. }
  195. static int
  196. mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
  197. struct mv_xor_chan *mv_chan)
  198. {
  199. dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
  200. __func__, __LINE__, desc, desc->async_tx.flags);
  201. /* the client is allowed to attach dependent operations
  202. * until 'ack' is set
  203. */
  204. if (!async_tx_test_ack(&desc->async_tx))
  205. /* move this slot to the completed_slots */
  206. list_move_tail(&desc->node, &mv_chan->completed_slots);
  207. else
  208. list_move_tail(&desc->node, &mv_chan->free_slots);
  209. return 0;
  210. }
  211. /* This function must be called with the mv_xor_chan spinlock held */
  212. static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
  213. {
  214. struct mv_xor_desc_slot *iter, *_iter;
  215. dma_cookie_t cookie = 0;
  216. int busy = mv_chan_is_busy(mv_chan);
  217. u32 current_desc = mv_chan_get_current_desc(mv_chan);
  218. int current_cleaned = 0;
  219. struct mv_xor_desc *hw_desc;
  220. dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
  221. dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
  222. mv_chan_clean_completed_slots(mv_chan);
  223. /* free completed slots from the chain starting with
  224. * the oldest descriptor
  225. */
  226. list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
  227. node) {
  228. /* clean finished descriptors */
  229. hw_desc = iter->hw_desc;
  230. if (hw_desc->status & XOR_DESC_SUCCESS) {
  231. cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
  232. cookie);
  233. /* done processing desc, clean slot */
  234. mv_desc_clean_slot(iter, mv_chan);
  235. /* break if we did cleaned the current */
  236. if (iter->async_tx.phys == current_desc) {
  237. current_cleaned = 1;
  238. break;
  239. }
  240. } else {
  241. if (iter->async_tx.phys == current_desc) {
  242. current_cleaned = 0;
  243. break;
  244. }
  245. }
  246. }
  247. if ((busy == 0) && !list_empty(&mv_chan->chain)) {
  248. if (current_cleaned) {
  249. /*
  250. * current descriptor cleaned and removed, run
  251. * from list head
  252. */
  253. iter = list_entry(mv_chan->chain.next,
  254. struct mv_xor_desc_slot,
  255. node);
  256. mv_chan_start_new_chain(mv_chan, iter);
  257. } else {
  258. if (!list_is_last(&iter->node, &mv_chan->chain)) {
  259. /*
  260. * descriptors are still waiting after
  261. * current, trigger them
  262. */
  263. iter = list_entry(iter->node.next,
  264. struct mv_xor_desc_slot,
  265. node);
  266. mv_chan_start_new_chain(mv_chan, iter);
  267. } else {
  268. /*
  269. * some descriptors are still waiting
  270. * to be cleaned
  271. */
  272. tasklet_schedule(&mv_chan->irq_tasklet);
  273. }
  274. }
  275. }
  276. if (cookie > 0)
  277. mv_chan->dmachan.completed_cookie = cookie;
  278. }
  279. static void mv_xor_tasklet(unsigned long data)
  280. {
  281. struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
  282. spin_lock_bh(&chan->lock);
  283. mv_chan_slot_cleanup(chan);
  284. spin_unlock_bh(&chan->lock);
  285. }
  286. static struct mv_xor_desc_slot *
  287. mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
  288. {
  289. struct mv_xor_desc_slot *iter;
  290. spin_lock_bh(&mv_chan->lock);
  291. if (!list_empty(&mv_chan->free_slots)) {
  292. iter = list_first_entry(&mv_chan->free_slots,
  293. struct mv_xor_desc_slot,
  294. node);
  295. list_move_tail(&iter->node, &mv_chan->allocated_slots);
  296. spin_unlock_bh(&mv_chan->lock);
  297. /* pre-ack descriptor */
  298. async_tx_ack(&iter->async_tx);
  299. iter->async_tx.cookie = -EBUSY;
  300. return iter;
  301. }
  302. spin_unlock_bh(&mv_chan->lock);
  303. /* try to free some slots if the allocation fails */
  304. tasklet_schedule(&mv_chan->irq_tasklet);
  305. return NULL;
  306. }
  307. /************************ DMA engine API functions ****************************/
  308. static dma_cookie_t
  309. mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
  310. {
  311. struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
  312. struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
  313. struct mv_xor_desc_slot *old_chain_tail;
  314. dma_cookie_t cookie;
  315. int new_hw_chain = 1;
  316. dev_dbg(mv_chan_to_devp(mv_chan),
  317. "%s sw_desc %p: async_tx %p\n",
  318. __func__, sw_desc, &sw_desc->async_tx);
  319. spin_lock_bh(&mv_chan->lock);
  320. cookie = dma_cookie_assign(tx);
  321. if (list_empty(&mv_chan->chain))
  322. list_move_tail(&sw_desc->node, &mv_chan->chain);
  323. else {
  324. new_hw_chain = 0;
  325. old_chain_tail = list_entry(mv_chan->chain.prev,
  326. struct mv_xor_desc_slot,
  327. node);
  328. list_move_tail(&sw_desc->node, &mv_chan->chain);
  329. dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
  330. &old_chain_tail->async_tx.phys);
  331. /* fix up the hardware chain */
  332. mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
  333. /* if the channel is not busy */
  334. if (!mv_chan_is_busy(mv_chan)) {
  335. u32 current_desc = mv_chan_get_current_desc(mv_chan);
  336. /*
  337. * and the curren desc is the end of the chain before
  338. * the append, then we need to start the channel
  339. */
  340. if (current_desc == old_chain_tail->async_tx.phys)
  341. new_hw_chain = 1;
  342. }
  343. }
  344. if (new_hw_chain)
  345. mv_chan_start_new_chain(mv_chan, sw_desc);
  346. spin_unlock_bh(&mv_chan->lock);
  347. return cookie;
  348. }
  349. /* returns the number of allocated descriptors */
  350. static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
  351. {
  352. void *virt_desc;
  353. dma_addr_t dma_desc;
  354. int idx;
  355. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  356. struct mv_xor_desc_slot *slot = NULL;
  357. int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
  358. /* Allocate descriptor slots */
  359. idx = mv_chan->slots_allocated;
  360. while (idx < num_descs_in_pool) {
  361. slot = kzalloc(sizeof(*slot), GFP_KERNEL);
  362. if (!slot) {
  363. dev_info(mv_chan_to_devp(mv_chan),
  364. "channel only initialized %d descriptor slots",
  365. idx);
  366. break;
  367. }
  368. virt_desc = mv_chan->dma_desc_pool_virt;
  369. slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
  370. dma_async_tx_descriptor_init(&slot->async_tx, chan);
  371. slot->async_tx.tx_submit = mv_xor_tx_submit;
  372. INIT_LIST_HEAD(&slot->node);
  373. dma_desc = mv_chan->dma_desc_pool;
  374. slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
  375. slot->idx = idx++;
  376. spin_lock_bh(&mv_chan->lock);
  377. mv_chan->slots_allocated = idx;
  378. list_add_tail(&slot->node, &mv_chan->free_slots);
  379. spin_unlock_bh(&mv_chan->lock);
  380. }
  381. dev_dbg(mv_chan_to_devp(mv_chan),
  382. "allocated %d descriptor slots\n",
  383. mv_chan->slots_allocated);
  384. return mv_chan->slots_allocated ? : -ENOMEM;
  385. }
  386. /*
  387. * Check if source or destination is an PCIe/IO address (non-SDRAM) and add
  388. * a new MBus window if necessary. Use a cache for these check so that
  389. * the MMIO mapped registers don't have to be accessed for this check
  390. * to speed up this process.
  391. */
  392. static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr)
  393. {
  394. struct mv_xor_device *xordev = mv_chan->xordev;
  395. void __iomem *base = mv_chan->mmr_high_base;
  396. u32 win_enable;
  397. u32 size;
  398. u8 target, attr;
  399. int ret;
  400. int i;
  401. /* Nothing needs to get done for the Armada 3700 */
  402. if (xordev->xor_type == XOR_ARMADA_37XX)
  403. return 0;
  404. /*
  405. * Loop over the cached windows to check, if the requested area
  406. * is already mapped. If this the case, nothing needs to be done
  407. * and we can return.
  408. */
  409. for (i = 0; i < WINDOW_COUNT; i++) {
  410. if (addr >= xordev->win_start[i] &&
  411. addr <= xordev->win_end[i]) {
  412. /* Window is already mapped */
  413. return 0;
  414. }
  415. }
  416. /*
  417. * The window is not mapped, so we need to create the new mapping
  418. */
  419. /* If no IO window is found that addr has to be located in SDRAM */
  420. ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr);
  421. if (ret < 0)
  422. return 0;
  423. /*
  424. * Mask the base addr 'addr' according to 'size' read back from the
  425. * MBus window. Otherwise we might end up with an address located
  426. * somewhere in the middle of this area here.
  427. */
  428. size -= 1;
  429. addr &= ~size;
  430. /*
  431. * Reading one of both enabled register is enough, as they are always
  432. * programmed to the identical values
  433. */
  434. win_enable = readl(base + WINDOW_BAR_ENABLE(0));
  435. /* Set 'i' to the first free window to write the new values to */
  436. i = ffs(~win_enable) - 1;
  437. if (i >= WINDOW_COUNT)
  438. return -ENOMEM;
  439. writel((addr & 0xffff0000) | (attr << 8) | target,
  440. base + WINDOW_BASE(i));
  441. writel(size & 0xffff0000, base + WINDOW_SIZE(i));
  442. /* Fill the caching variables for later use */
  443. xordev->win_start[i] = addr;
  444. xordev->win_end[i] = addr + size;
  445. win_enable |= (1 << i);
  446. win_enable |= 3 << (16 + (2 * i));
  447. writel(win_enable, base + WINDOW_BAR_ENABLE(0));
  448. writel(win_enable, base + WINDOW_BAR_ENABLE(1));
  449. return 0;
  450. }
  451. static struct dma_async_tx_descriptor *
  452. mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
  453. unsigned int src_cnt, size_t len, unsigned long flags)
  454. {
  455. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  456. struct mv_xor_desc_slot *sw_desc;
  457. int ret;
  458. if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
  459. return NULL;
  460. BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
  461. dev_dbg(mv_chan_to_devp(mv_chan),
  462. "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
  463. __func__, src_cnt, len, &dest, flags);
  464. /* Check if a new window needs to get added for 'dest' */
  465. ret = mv_xor_add_io_win(mv_chan, dest);
  466. if (ret)
  467. return NULL;
  468. sw_desc = mv_chan_alloc_slot(mv_chan);
  469. if (sw_desc) {
  470. sw_desc->type = DMA_XOR;
  471. sw_desc->async_tx.flags = flags;
  472. mv_desc_init(sw_desc, dest, len, flags);
  473. if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
  474. mv_desc_set_mode(sw_desc);
  475. while (src_cnt--) {
  476. /* Check if a new window needs to get added for 'src' */
  477. ret = mv_xor_add_io_win(mv_chan, src[src_cnt]);
  478. if (ret)
  479. return NULL;
  480. mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
  481. }
  482. }
  483. dev_dbg(mv_chan_to_devp(mv_chan),
  484. "%s sw_desc %p async_tx %p \n",
  485. __func__, sw_desc, &sw_desc->async_tx);
  486. return sw_desc ? &sw_desc->async_tx : NULL;
  487. }
  488. static struct dma_async_tx_descriptor *
  489. mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  490. size_t len, unsigned long flags)
  491. {
  492. /*
  493. * A MEMCPY operation is identical to an XOR operation with only
  494. * a single source address.
  495. */
  496. return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
  497. }
  498. static struct dma_async_tx_descriptor *
  499. mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
  500. {
  501. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  502. dma_addr_t src, dest;
  503. size_t len;
  504. src = mv_chan->dummy_src_addr;
  505. dest = mv_chan->dummy_dst_addr;
  506. len = MV_XOR_MIN_BYTE_COUNT;
  507. /*
  508. * We implement the DMA_INTERRUPT operation as a minimum sized
  509. * XOR operation with a single dummy source address.
  510. */
  511. return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
  512. }
  513. static void mv_xor_free_chan_resources(struct dma_chan *chan)
  514. {
  515. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  516. struct mv_xor_desc_slot *iter, *_iter;
  517. int in_use_descs = 0;
  518. spin_lock_bh(&mv_chan->lock);
  519. mv_chan_slot_cleanup(mv_chan);
  520. list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
  521. node) {
  522. in_use_descs++;
  523. list_move_tail(&iter->node, &mv_chan->free_slots);
  524. }
  525. list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
  526. node) {
  527. in_use_descs++;
  528. list_move_tail(&iter->node, &mv_chan->free_slots);
  529. }
  530. list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
  531. node) {
  532. in_use_descs++;
  533. list_move_tail(&iter->node, &mv_chan->free_slots);
  534. }
  535. list_for_each_entry_safe_reverse(
  536. iter, _iter, &mv_chan->free_slots, node) {
  537. list_del(&iter->node);
  538. kfree(iter);
  539. mv_chan->slots_allocated--;
  540. }
  541. dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
  542. __func__, mv_chan->slots_allocated);
  543. spin_unlock_bh(&mv_chan->lock);
  544. if (in_use_descs)
  545. dev_err(mv_chan_to_devp(mv_chan),
  546. "freeing %d in use descriptors!\n", in_use_descs);
  547. }
  548. /**
  549. * mv_xor_status - poll the status of an XOR transaction
  550. * @chan: XOR channel handle
  551. * @cookie: XOR transaction identifier
  552. * @txstate: XOR transactions state holder (or NULL)
  553. */
  554. static enum dma_status mv_xor_status(struct dma_chan *chan,
  555. dma_cookie_t cookie,
  556. struct dma_tx_state *txstate)
  557. {
  558. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  559. enum dma_status ret;
  560. ret = dma_cookie_status(chan, cookie, txstate);
  561. if (ret == DMA_COMPLETE)
  562. return ret;
  563. spin_lock_bh(&mv_chan->lock);
  564. mv_chan_slot_cleanup(mv_chan);
  565. spin_unlock_bh(&mv_chan->lock);
  566. return dma_cookie_status(chan, cookie, txstate);
  567. }
  568. static void mv_chan_dump_regs(struct mv_xor_chan *chan)
  569. {
  570. u32 val;
  571. val = readl_relaxed(XOR_CONFIG(chan));
  572. dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
  573. val = readl_relaxed(XOR_ACTIVATION(chan));
  574. dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
  575. val = readl_relaxed(XOR_INTR_CAUSE(chan));
  576. dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
  577. val = readl_relaxed(XOR_INTR_MASK(chan));
  578. dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
  579. val = readl_relaxed(XOR_ERROR_CAUSE(chan));
  580. dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
  581. val = readl_relaxed(XOR_ERROR_ADDR(chan));
  582. dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
  583. }
  584. static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
  585. u32 intr_cause)
  586. {
  587. if (intr_cause & XOR_INT_ERR_DECODE) {
  588. dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
  589. return;
  590. }
  591. dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
  592. chan->idx, intr_cause);
  593. mv_chan_dump_regs(chan);
  594. WARN_ON(1);
  595. }
  596. static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
  597. {
  598. struct mv_xor_chan *chan = data;
  599. u32 intr_cause = mv_chan_get_intr_cause(chan);
  600. dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
  601. if (intr_cause & XOR_INTR_ERRORS)
  602. mv_chan_err_interrupt_handler(chan, intr_cause);
  603. tasklet_schedule(&chan->irq_tasklet);
  604. mv_chan_clear_eoc_cause(chan);
  605. return IRQ_HANDLED;
  606. }
  607. static void mv_xor_issue_pending(struct dma_chan *chan)
  608. {
  609. struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  610. if (mv_chan->pending >= MV_XOR_THRESHOLD) {
  611. mv_chan->pending = 0;
  612. mv_chan_activate(mv_chan);
  613. }
  614. }
  615. /*
  616. * Perform a transaction to verify the HW works.
  617. */
  618. static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
  619. {
  620. int i, ret;
  621. void *src, *dest;
  622. dma_addr_t src_dma, dest_dma;
  623. struct dma_chan *dma_chan;
  624. dma_cookie_t cookie;
  625. struct dma_async_tx_descriptor *tx;
  626. struct dmaengine_unmap_data *unmap;
  627. int err = 0;
  628. src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
  629. if (!src)
  630. return -ENOMEM;
  631. dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
  632. if (!dest) {
  633. kfree(src);
  634. return -ENOMEM;
  635. }
  636. /* Fill in src buffer */
  637. for (i = 0; i < PAGE_SIZE; i++)
  638. ((u8 *) src)[i] = (u8)i;
  639. dma_chan = &mv_chan->dmachan;
  640. if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
  641. err = -ENODEV;
  642. goto out;
  643. }
  644. unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
  645. if (!unmap) {
  646. err = -ENOMEM;
  647. goto free_resources;
  648. }
  649. src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
  650. (size_t)src & ~PAGE_MASK, PAGE_SIZE,
  651. DMA_TO_DEVICE);
  652. unmap->addr[0] = src_dma;
  653. ret = dma_mapping_error(dma_chan->device->dev, src_dma);
  654. if (ret) {
  655. err = -ENOMEM;
  656. goto free_resources;
  657. }
  658. unmap->to_cnt = 1;
  659. dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
  660. (size_t)dest & ~PAGE_MASK, PAGE_SIZE,
  661. DMA_FROM_DEVICE);
  662. unmap->addr[1] = dest_dma;
  663. ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
  664. if (ret) {
  665. err = -ENOMEM;
  666. goto free_resources;
  667. }
  668. unmap->from_cnt = 1;
  669. unmap->len = PAGE_SIZE;
  670. tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
  671. PAGE_SIZE, 0);
  672. if (!tx) {
  673. dev_err(dma_chan->device->dev,
  674. "Self-test cannot prepare operation, disabling\n");
  675. err = -ENODEV;
  676. goto free_resources;
  677. }
  678. cookie = mv_xor_tx_submit(tx);
  679. if (dma_submit_error(cookie)) {
  680. dev_err(dma_chan->device->dev,
  681. "Self-test submit error, disabling\n");
  682. err = -ENODEV;
  683. goto free_resources;
  684. }
  685. mv_xor_issue_pending(dma_chan);
  686. async_tx_ack(tx);
  687. msleep(1);
  688. if (mv_xor_status(dma_chan, cookie, NULL) !=
  689. DMA_COMPLETE) {
  690. dev_err(dma_chan->device->dev,
  691. "Self-test copy timed out, disabling\n");
  692. err = -ENODEV;
  693. goto free_resources;
  694. }
  695. dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
  696. PAGE_SIZE, DMA_FROM_DEVICE);
  697. if (memcmp(src, dest, PAGE_SIZE)) {
  698. dev_err(dma_chan->device->dev,
  699. "Self-test copy failed compare, disabling\n");
  700. err = -ENODEV;
  701. goto free_resources;
  702. }
  703. free_resources:
  704. dmaengine_unmap_put(unmap);
  705. mv_xor_free_chan_resources(dma_chan);
  706. out:
  707. kfree(src);
  708. kfree(dest);
  709. return err;
  710. }
  711. #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
  712. static int
  713. mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
  714. {
  715. int i, src_idx, ret;
  716. struct page *dest;
  717. struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
  718. dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
  719. dma_addr_t dest_dma;
  720. struct dma_async_tx_descriptor *tx;
  721. struct dmaengine_unmap_data *unmap;
  722. struct dma_chan *dma_chan;
  723. dma_cookie_t cookie;
  724. u8 cmp_byte = 0;
  725. u32 cmp_word;
  726. int err = 0;
  727. int src_count = MV_XOR_NUM_SRC_TEST;
  728. for (src_idx = 0; src_idx < src_count; src_idx++) {
  729. xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
  730. if (!xor_srcs[src_idx]) {
  731. while (src_idx--)
  732. __free_page(xor_srcs[src_idx]);
  733. return -ENOMEM;
  734. }
  735. }
  736. dest = alloc_page(GFP_KERNEL);
  737. if (!dest) {
  738. while (src_idx--)
  739. __free_page(xor_srcs[src_idx]);
  740. return -ENOMEM;
  741. }
  742. /* Fill in src buffers */
  743. for (src_idx = 0; src_idx < src_count; src_idx++) {
  744. u8 *ptr = page_address(xor_srcs[src_idx]);
  745. for (i = 0; i < PAGE_SIZE; i++)
  746. ptr[i] = (1 << src_idx);
  747. }
  748. for (src_idx = 0; src_idx < src_count; src_idx++)
  749. cmp_byte ^= (u8) (1 << src_idx);
  750. cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
  751. (cmp_byte << 8) | cmp_byte;
  752. memset(page_address(dest), 0, PAGE_SIZE);
  753. dma_chan = &mv_chan->dmachan;
  754. if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
  755. err = -ENODEV;
  756. goto out;
  757. }
  758. unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
  759. GFP_KERNEL);
  760. if (!unmap) {
  761. err = -ENOMEM;
  762. goto free_resources;
  763. }
  764. /* test xor */
  765. for (i = 0; i < src_count; i++) {
  766. unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
  767. 0, PAGE_SIZE, DMA_TO_DEVICE);
  768. dma_srcs[i] = unmap->addr[i];
  769. ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
  770. if (ret) {
  771. err = -ENOMEM;
  772. goto free_resources;
  773. }
  774. unmap->to_cnt++;
  775. }
  776. unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
  777. DMA_FROM_DEVICE);
  778. dest_dma = unmap->addr[src_count];
  779. ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
  780. if (ret) {
  781. err = -ENOMEM;
  782. goto free_resources;
  783. }
  784. unmap->from_cnt = 1;
  785. unmap->len = PAGE_SIZE;
  786. tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
  787. src_count, PAGE_SIZE, 0);
  788. if (!tx) {
  789. dev_err(dma_chan->device->dev,
  790. "Self-test cannot prepare operation, disabling\n");
  791. err = -ENODEV;
  792. goto free_resources;
  793. }
  794. cookie = mv_xor_tx_submit(tx);
  795. if (dma_submit_error(cookie)) {
  796. dev_err(dma_chan->device->dev,
  797. "Self-test submit error, disabling\n");
  798. err = -ENODEV;
  799. goto free_resources;
  800. }
  801. mv_xor_issue_pending(dma_chan);
  802. async_tx_ack(tx);
  803. msleep(8);
  804. if (mv_xor_status(dma_chan, cookie, NULL) !=
  805. DMA_COMPLETE) {
  806. dev_err(dma_chan->device->dev,
  807. "Self-test xor timed out, disabling\n");
  808. err = -ENODEV;
  809. goto free_resources;
  810. }
  811. dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
  812. PAGE_SIZE, DMA_FROM_DEVICE);
  813. for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
  814. u32 *ptr = page_address(dest);
  815. if (ptr[i] != cmp_word) {
  816. dev_err(dma_chan->device->dev,
  817. "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
  818. i, ptr[i], cmp_word);
  819. err = -ENODEV;
  820. goto free_resources;
  821. }
  822. }
  823. free_resources:
  824. dmaengine_unmap_put(unmap);
  825. mv_xor_free_chan_resources(dma_chan);
  826. out:
  827. src_idx = src_count;
  828. while (src_idx--)
  829. __free_page(xor_srcs[src_idx]);
  830. __free_page(dest);
  831. return err;
  832. }
  833. static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
  834. {
  835. struct dma_chan *chan, *_chan;
  836. struct device *dev = mv_chan->dmadev.dev;
  837. dma_async_device_unregister(&mv_chan->dmadev);
  838. dma_free_coherent(dev, MV_XOR_POOL_SIZE,
  839. mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
  840. dma_unmap_single(dev, mv_chan->dummy_src_addr,
  841. MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
  842. dma_unmap_single(dev, mv_chan->dummy_dst_addr,
  843. MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
  844. list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
  845. device_node) {
  846. list_del(&chan->device_node);
  847. }
  848. free_irq(mv_chan->irq, mv_chan);
  849. return 0;
  850. }
  851. static struct mv_xor_chan *
  852. mv_xor_channel_add(struct mv_xor_device *xordev,
  853. struct platform_device *pdev,
  854. int idx, dma_cap_mask_t cap_mask, int irq)
  855. {
  856. int ret = 0;
  857. struct mv_xor_chan *mv_chan;
  858. struct dma_device *dma_dev;
  859. mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
  860. if (!mv_chan)
  861. return ERR_PTR(-ENOMEM);
  862. mv_chan->idx = idx;
  863. mv_chan->irq = irq;
  864. if (xordev->xor_type == XOR_ORION)
  865. mv_chan->op_in_desc = XOR_MODE_IN_REG;
  866. else
  867. mv_chan->op_in_desc = XOR_MODE_IN_DESC;
  868. dma_dev = &mv_chan->dmadev;
  869. mv_chan->xordev = xordev;
  870. /*
  871. * These source and destination dummy buffers are used to implement
  872. * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
  873. * Hence, we only need to map the buffers at initialization-time.
  874. */
  875. mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
  876. mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
  877. mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
  878. mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
  879. /* allocate coherent memory for hardware descriptors
  880. * note: writecombine gives slightly better performance, but
  881. * requires that we explicitly flush the writes
  882. */
  883. mv_chan->dma_desc_pool_virt =
  884. dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
  885. GFP_KERNEL);
  886. if (!mv_chan->dma_desc_pool_virt)
  887. return ERR_PTR(-ENOMEM);
  888. /* discover transaction capabilites from the platform data */
  889. dma_dev->cap_mask = cap_mask;
  890. INIT_LIST_HEAD(&dma_dev->channels);
  891. /* set base routines */
  892. dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
  893. dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
  894. dma_dev->device_tx_status = mv_xor_status;
  895. dma_dev->device_issue_pending = mv_xor_issue_pending;
  896. dma_dev->dev = &pdev->dev;
  897. /* set prep routines based on capability */
  898. if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
  899. dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
  900. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
  901. dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
  902. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  903. dma_dev->max_xor = 8;
  904. dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
  905. }
  906. mv_chan->mmr_base = xordev->xor_base;
  907. mv_chan->mmr_high_base = xordev->xor_high_base;
  908. tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
  909. mv_chan);
  910. /* clear errors before enabling interrupts */
  911. mv_chan_clear_err_status(mv_chan);
  912. ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
  913. 0, dev_name(&pdev->dev), mv_chan);
  914. if (ret)
  915. goto err_free_dma;
  916. mv_chan_unmask_interrupts(mv_chan);
  917. if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
  918. mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
  919. else
  920. mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
  921. spin_lock_init(&mv_chan->lock);
  922. INIT_LIST_HEAD(&mv_chan->chain);
  923. INIT_LIST_HEAD(&mv_chan->completed_slots);
  924. INIT_LIST_HEAD(&mv_chan->free_slots);
  925. INIT_LIST_HEAD(&mv_chan->allocated_slots);
  926. mv_chan->dmachan.device = dma_dev;
  927. dma_cookie_init(&mv_chan->dmachan);
  928. list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
  929. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
  930. ret = mv_chan_memcpy_self_test(mv_chan);
  931. dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
  932. if (ret)
  933. goto err_free_irq;
  934. }
  935. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  936. ret = mv_chan_xor_self_test(mv_chan);
  937. dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
  938. if (ret)
  939. goto err_free_irq;
  940. }
  941. dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
  942. mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
  943. dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
  944. dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
  945. dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
  946. dma_async_device_register(dma_dev);
  947. return mv_chan;
  948. err_free_irq:
  949. free_irq(mv_chan->irq, mv_chan);
  950. err_free_dma:
  951. dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
  952. mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
  953. return ERR_PTR(ret);
  954. }
  955. static void
  956. mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
  957. const struct mbus_dram_target_info *dram)
  958. {
  959. void __iomem *base = xordev->xor_high_base;
  960. u32 win_enable = 0;
  961. int i;
  962. for (i = 0; i < 8; i++) {
  963. writel(0, base + WINDOW_BASE(i));
  964. writel(0, base + WINDOW_SIZE(i));
  965. if (i < 4)
  966. writel(0, base + WINDOW_REMAP_HIGH(i));
  967. }
  968. for (i = 0; i < dram->num_cs; i++) {
  969. const struct mbus_dram_window *cs = dram->cs + i;
  970. writel((cs->base & 0xffff0000) |
  971. (cs->mbus_attr << 8) |
  972. dram->mbus_dram_target_id, base + WINDOW_BASE(i));
  973. writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
  974. /* Fill the caching variables for later use */
  975. xordev->win_start[i] = cs->base;
  976. xordev->win_end[i] = cs->base + cs->size - 1;
  977. win_enable |= (1 << i);
  978. win_enable |= 3 << (16 + (2 * i));
  979. }
  980. writel(win_enable, base + WINDOW_BAR_ENABLE(0));
  981. writel(win_enable, base + WINDOW_BAR_ENABLE(1));
  982. writel(0, base + WINDOW_OVERRIDE_CTRL(0));
  983. writel(0, base + WINDOW_OVERRIDE_CTRL(1));
  984. }
  985. static void
  986. mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
  987. {
  988. void __iomem *base = xordev->xor_high_base;
  989. u32 win_enable = 0;
  990. int i;
  991. for (i = 0; i < 8; i++) {
  992. writel(0, base + WINDOW_BASE(i));
  993. writel(0, base + WINDOW_SIZE(i));
  994. if (i < 4)
  995. writel(0, base + WINDOW_REMAP_HIGH(i));
  996. }
  997. /*
  998. * For Armada3700 open default 4GB Mbus window. The dram
  999. * related configuration are done at AXIS level.
  1000. */
  1001. writel(0xffff0000, base + WINDOW_SIZE(0));
  1002. win_enable |= 1;
  1003. win_enable |= 3 << 16;
  1004. writel(win_enable, base + WINDOW_BAR_ENABLE(0));
  1005. writel(win_enable, base + WINDOW_BAR_ENABLE(1));
  1006. writel(0, base + WINDOW_OVERRIDE_CTRL(0));
  1007. writel(0, base + WINDOW_OVERRIDE_CTRL(1));
  1008. }
  1009. /*
  1010. * Since this XOR driver is basically used only for RAID5, we don't
  1011. * need to care about synchronizing ->suspend with DMA activity,
  1012. * because the DMA engine will naturally be quiet due to the block
  1013. * devices being suspended.
  1014. */
  1015. static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
  1016. {
  1017. struct mv_xor_device *xordev = platform_get_drvdata(pdev);
  1018. int i;
  1019. for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
  1020. struct mv_xor_chan *mv_chan = xordev->channels[i];
  1021. if (!mv_chan)
  1022. continue;
  1023. mv_chan->saved_config_reg =
  1024. readl_relaxed(XOR_CONFIG(mv_chan));
  1025. mv_chan->saved_int_mask_reg =
  1026. readl_relaxed(XOR_INTR_MASK(mv_chan));
  1027. }
  1028. return 0;
  1029. }
  1030. static int mv_xor_resume(struct platform_device *dev)
  1031. {
  1032. struct mv_xor_device *xordev = platform_get_drvdata(dev);
  1033. const struct mbus_dram_target_info *dram;
  1034. int i;
  1035. for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
  1036. struct mv_xor_chan *mv_chan = xordev->channels[i];
  1037. if (!mv_chan)
  1038. continue;
  1039. writel_relaxed(mv_chan->saved_config_reg,
  1040. XOR_CONFIG(mv_chan));
  1041. writel_relaxed(mv_chan->saved_int_mask_reg,
  1042. XOR_INTR_MASK(mv_chan));
  1043. }
  1044. if (xordev->xor_type == XOR_ARMADA_37XX) {
  1045. mv_xor_conf_mbus_windows_a3700(xordev);
  1046. return 0;
  1047. }
  1048. dram = mv_mbus_dram_info();
  1049. if (dram)
  1050. mv_xor_conf_mbus_windows(xordev, dram);
  1051. return 0;
  1052. }
  1053. static const struct of_device_id mv_xor_dt_ids[] = {
  1054. { .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
  1055. { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
  1056. { .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
  1057. {},
  1058. };
  1059. static unsigned int mv_xor_engine_count;
  1060. static int mv_xor_probe(struct platform_device *pdev)
  1061. {
  1062. const struct mbus_dram_target_info *dram;
  1063. struct mv_xor_device *xordev;
  1064. struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
  1065. struct resource *res;
  1066. unsigned int max_engines, max_channels;
  1067. int i, ret;
  1068. dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
  1069. xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
  1070. if (!xordev)
  1071. return -ENOMEM;
  1072. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1073. if (!res)
  1074. return -ENODEV;
  1075. xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
  1076. resource_size(res));
  1077. if (!xordev->xor_base)
  1078. return -EBUSY;
  1079. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1080. if (!res)
  1081. return -ENODEV;
  1082. xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
  1083. resource_size(res));
  1084. if (!xordev->xor_high_base)
  1085. return -EBUSY;
  1086. platform_set_drvdata(pdev, xordev);
  1087. /*
  1088. * We need to know which type of XOR device we use before
  1089. * setting up. In non-dt case it can only be the legacy one.
  1090. */
  1091. xordev->xor_type = XOR_ORION;
  1092. if (pdev->dev.of_node) {
  1093. const struct of_device_id *of_id =
  1094. of_match_device(mv_xor_dt_ids,
  1095. &pdev->dev);
  1096. xordev->xor_type = (uintptr_t)of_id->data;
  1097. }
  1098. /*
  1099. * (Re-)program MBUS remapping windows if we are asked to.
  1100. */
  1101. if (xordev->xor_type == XOR_ARMADA_37XX) {
  1102. mv_xor_conf_mbus_windows_a3700(xordev);
  1103. } else {
  1104. dram = mv_mbus_dram_info();
  1105. if (dram)
  1106. mv_xor_conf_mbus_windows(xordev, dram);
  1107. }
  1108. /* Not all platforms can gate the clock, so it is not
  1109. * an error if the clock does not exists.
  1110. */
  1111. xordev->clk = clk_get(&pdev->dev, NULL);
  1112. if (!IS_ERR(xordev->clk))
  1113. clk_prepare_enable(xordev->clk);
  1114. /*
  1115. * We don't want to have more than one channel per CPU in
  1116. * order for async_tx to perform well. So we limit the number
  1117. * of engines and channels so that we take into account this
  1118. * constraint. Note that we also want to use channels from
  1119. * separate engines when possible. For dual-CPU Armada 3700
  1120. * SoC with single XOR engine allow using its both channels.
  1121. */
  1122. max_engines = num_present_cpus();
  1123. if (xordev->xor_type == XOR_ARMADA_37XX)
  1124. max_channels = num_present_cpus();
  1125. else
  1126. max_channels = min_t(unsigned int,
  1127. MV_XOR_MAX_CHANNELS,
  1128. DIV_ROUND_UP(num_present_cpus(), 2));
  1129. if (mv_xor_engine_count >= max_engines)
  1130. return 0;
  1131. if (pdev->dev.of_node) {
  1132. struct device_node *np;
  1133. int i = 0;
  1134. for_each_child_of_node(pdev->dev.of_node, np) {
  1135. struct mv_xor_chan *chan;
  1136. dma_cap_mask_t cap_mask;
  1137. int irq;
  1138. if (i >= max_channels)
  1139. continue;
  1140. dma_cap_zero(cap_mask);
  1141. dma_cap_set(DMA_MEMCPY, cap_mask);
  1142. dma_cap_set(DMA_XOR, cap_mask);
  1143. dma_cap_set(DMA_INTERRUPT, cap_mask);
  1144. irq = irq_of_parse_and_map(np, 0);
  1145. if (!irq) {
  1146. ret = -ENODEV;
  1147. goto err_channel_add;
  1148. }
  1149. chan = mv_xor_channel_add(xordev, pdev, i,
  1150. cap_mask, irq);
  1151. if (IS_ERR(chan)) {
  1152. ret = PTR_ERR(chan);
  1153. irq_dispose_mapping(irq);
  1154. goto err_channel_add;
  1155. }
  1156. xordev->channels[i] = chan;
  1157. i++;
  1158. }
  1159. } else if (pdata && pdata->channels) {
  1160. for (i = 0; i < max_channels; i++) {
  1161. struct mv_xor_channel_data *cd;
  1162. struct mv_xor_chan *chan;
  1163. int irq;
  1164. cd = &pdata->channels[i];
  1165. if (!cd) {
  1166. ret = -ENODEV;
  1167. goto err_channel_add;
  1168. }
  1169. irq = platform_get_irq(pdev, i);
  1170. if (irq < 0) {
  1171. ret = irq;
  1172. goto err_channel_add;
  1173. }
  1174. chan = mv_xor_channel_add(xordev, pdev, i,
  1175. cd->cap_mask, irq);
  1176. if (IS_ERR(chan)) {
  1177. ret = PTR_ERR(chan);
  1178. goto err_channel_add;
  1179. }
  1180. xordev->channels[i] = chan;
  1181. }
  1182. }
  1183. return 0;
  1184. err_channel_add:
  1185. for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
  1186. if (xordev->channels[i]) {
  1187. mv_xor_channel_remove(xordev->channels[i]);
  1188. if (pdev->dev.of_node)
  1189. irq_dispose_mapping(xordev->channels[i]->irq);
  1190. }
  1191. if (!IS_ERR(xordev->clk)) {
  1192. clk_disable_unprepare(xordev->clk);
  1193. clk_put(xordev->clk);
  1194. }
  1195. return ret;
  1196. }
  1197. static struct platform_driver mv_xor_driver = {
  1198. .probe = mv_xor_probe,
  1199. .suspend = mv_xor_suspend,
  1200. .resume = mv_xor_resume,
  1201. .driver = {
  1202. .name = MV_XOR_NAME,
  1203. .of_match_table = of_match_ptr(mv_xor_dt_ids),
  1204. },
  1205. };
  1206. static int __init mv_xor_init(void)
  1207. {
  1208. return platform_driver_register(&mv_xor_driver);
  1209. }
  1210. device_initcall(mv_xor_init);
  1211. /*
  1212. MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
  1213. MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
  1214. MODULE_LICENSE("GPL");
  1215. */