bcm-sba-raid.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832
  1. /*
  2. * Copyright (C) 2017 Broadcom
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. /*
  9. * Broadcom SBA RAID Driver
  10. *
  11. * The Broadcom stream buffer accelerator (SBA) provides offloading
  12. * capabilities for RAID operations. The SBA offload engine is accessible
  13. * via Broadcom SoC specific ring manager. Two or more offload engines
  14. * can share same Broadcom SoC specific ring manager due to this Broadcom
  15. * SoC specific ring manager driver is implemented as a mailbox controller
  16. * driver and offload engine drivers are implemented as mallbox clients.
  17. *
  18. * Typically, Broadcom SoC specific ring manager will implement larger
  19. * number of hardware rings over one or more SBA hardware devices. By
  20. * design, the internal buffer size of SBA hardware device is limited
  21. * but all offload operations supported by SBA can be broken down into
  22. * multiple small size requests and executed parallely on multiple SBA
  23. * hardware devices for achieving high through-put.
  24. *
  25. * The Broadcom SBA RAID driver does not require any register programming
  26. * except submitting request to SBA hardware device via mailbox channels.
  27. * This driver implements a DMA device with one DMA channel using a set
  28. * of mailbox channels provided by Broadcom SoC specific ring manager
  29. * driver. To exploit parallelism (as described above), all DMA request
  30. * coming to SBA RAID DMA channel are broken down to smaller requests
  31. * and submitted to multiple mailbox channels in round-robin fashion.
  32. * For having more SBA DMA channels, we can create more SBA device nodes
  33. * in Broadcom SoC specific DTS based on number of hardware rings supported
  34. * by Broadcom SoC ring manager.
  35. */
  36. #include <linux/bitops.h>
  37. #include <linux/debugfs.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/dmaengine.h>
  40. #include <linux/list.h>
  41. #include <linux/mailbox_client.h>
  42. #include <linux/mailbox/brcm-message.h>
  43. #include <linux/module.h>
  44. #include <linux/of_device.h>
  45. #include <linux/slab.h>
  46. #include <linux/raid/pq.h>
  47. #include "dmaengine.h"
  48. /* ====== Driver macros and defines ===== */
  49. #define SBA_TYPE_SHIFT 48
  50. #define SBA_TYPE_MASK GENMASK(1, 0)
  51. #define SBA_TYPE_A 0x0
  52. #define SBA_TYPE_B 0x2
  53. #define SBA_TYPE_C 0x3
  54. #define SBA_USER_DEF_SHIFT 32
  55. #define SBA_USER_DEF_MASK GENMASK(15, 0)
  56. #define SBA_R_MDATA_SHIFT 24
  57. #define SBA_R_MDATA_MASK GENMASK(7, 0)
  58. #define SBA_C_MDATA_MS_SHIFT 18
  59. #define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
  60. #define SBA_INT_SHIFT 17
  61. #define SBA_INT_MASK BIT(0)
  62. #define SBA_RESP_SHIFT 16
  63. #define SBA_RESP_MASK BIT(0)
  64. #define SBA_C_MDATA_SHIFT 8
  65. #define SBA_C_MDATA_MASK GENMASK(7, 0)
  66. #define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
  67. #define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
  68. #define SBA_C_MDATA_DNUM_SHIFT 5
  69. #define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
  70. #define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
  71. #define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
  72. #define SBA_CMD_SHIFT 0
  73. #define SBA_CMD_MASK GENMASK(3, 0)
  74. #define SBA_CMD_ZERO_BUFFER 0x4
  75. #define SBA_CMD_ZERO_ALL_BUFFERS 0x8
  76. #define SBA_CMD_LOAD_BUFFER 0x9
  77. #define SBA_CMD_XOR 0xa
  78. #define SBA_CMD_GALOIS_XOR 0xb
  79. #define SBA_CMD_WRITE_BUFFER 0xc
  80. #define SBA_CMD_GALOIS 0xe
  81. #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
  82. /* Driver helper macros */
  83. #define to_sba_request(tx) \
  84. container_of(tx, struct sba_request, tx)
  85. #define to_sba_device(dchan) \
  86. container_of(dchan, struct sba_device, dma_chan)
  87. /* ===== Driver data structures ===== */
  88. enum sba_request_flags {
  89. SBA_REQUEST_STATE_FREE = 0x001,
  90. SBA_REQUEST_STATE_ALLOCED = 0x002,
  91. SBA_REQUEST_STATE_PENDING = 0x004,
  92. SBA_REQUEST_STATE_ACTIVE = 0x008,
  93. SBA_REQUEST_STATE_ABORTED = 0x010,
  94. SBA_REQUEST_STATE_MASK = 0x0ff,
  95. SBA_REQUEST_FENCE = 0x100,
  96. };
  97. struct sba_request {
  98. /* Global state */
  99. struct list_head node;
  100. struct sba_device *sba;
  101. u32 flags;
  102. /* Chained requests management */
  103. struct sba_request *first;
  104. struct list_head next;
  105. atomic_t next_pending_count;
  106. /* BRCM message data */
  107. struct brcm_message msg;
  108. struct dma_async_tx_descriptor tx;
  109. /* SBA commands */
  110. struct brcm_sba_command cmds[0];
  111. };
  112. enum sba_version {
  113. SBA_VER_1 = 0,
  114. SBA_VER_2
  115. };
  116. struct sba_device {
  117. /* Underlying device */
  118. struct device *dev;
  119. /* DT configuration parameters */
  120. enum sba_version ver;
  121. /* Derived configuration parameters */
  122. u32 max_req;
  123. u32 hw_buf_size;
  124. u32 hw_resp_size;
  125. u32 max_pq_coefs;
  126. u32 max_pq_srcs;
  127. u32 max_cmd_per_req;
  128. u32 max_xor_srcs;
  129. u32 max_resp_pool_size;
  130. u32 max_cmds_pool_size;
  131. /* Maibox client and Mailbox channels */
  132. struct mbox_client client;
  133. int mchans_count;
  134. atomic_t mchans_current;
  135. struct mbox_chan **mchans;
  136. struct device *mbox_dev;
  137. /* DMA device and DMA channel */
  138. struct dma_device dma_dev;
  139. struct dma_chan dma_chan;
  140. /* DMA channel resources */
  141. void *resp_base;
  142. dma_addr_t resp_dma_base;
  143. void *cmds_base;
  144. dma_addr_t cmds_dma_base;
  145. spinlock_t reqs_lock;
  146. bool reqs_fence;
  147. struct list_head reqs_alloc_list;
  148. struct list_head reqs_pending_list;
  149. struct list_head reqs_active_list;
  150. struct list_head reqs_aborted_list;
  151. struct list_head reqs_free_list;
  152. /* DebugFS directory entries */
  153. struct dentry *root;
  154. struct dentry *stats;
  155. };
  156. /* ====== Command helper routines ===== */
  157. static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask)
  158. {
  159. cmd &= ~((u64)mask << shift);
  160. cmd |= ((u64)(val & mask) << shift);
  161. return cmd;
  162. }
  163. static inline u32 __pure sba_cmd_load_c_mdata(u32 b0)
  164. {
  165. return b0 & SBA_C_MDATA_BNUMx_MASK;
  166. }
  167. static inline u32 __pure sba_cmd_write_c_mdata(u32 b0)
  168. {
  169. return b0 & SBA_C_MDATA_BNUMx_MASK;
  170. }
  171. static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0)
  172. {
  173. return (b0 & SBA_C_MDATA_BNUMx_MASK) |
  174. ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1));
  175. }
  176. static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
  177. {
  178. return (b0 & SBA_C_MDATA_BNUMx_MASK) |
  179. ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
  180. ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT);
  181. }
  182. /* ====== General helper routines ===== */
  183. static void sba_peek_mchans(struct sba_device *sba)
  184. {
  185. int mchan_idx;
  186. for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
  187. mbox_client_peek_data(sba->mchans[mchan_idx]);
  188. }
  189. static struct sba_request *sba_alloc_request(struct sba_device *sba)
  190. {
  191. bool found = false;
  192. unsigned long flags;
  193. struct sba_request *req = NULL;
  194. spin_lock_irqsave(&sba->reqs_lock, flags);
  195. list_for_each_entry(req, &sba->reqs_free_list, node) {
  196. if (async_tx_test_ack(&req->tx)) {
  197. list_move_tail(&req->node, &sba->reqs_alloc_list);
  198. found = true;
  199. break;
  200. }
  201. }
  202. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  203. if (!found) {
  204. /*
  205. * We have no more free requests so, we peek
  206. * mailbox channels hoping few active requests
  207. * would have completed which will create more
  208. * room for new requests.
  209. */
  210. sba_peek_mchans(sba);
  211. return NULL;
  212. }
  213. req->flags = SBA_REQUEST_STATE_ALLOCED;
  214. req->first = req;
  215. INIT_LIST_HEAD(&req->next);
  216. atomic_set(&req->next_pending_count, 1);
  217. dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
  218. async_tx_ack(&req->tx);
  219. return req;
  220. }
  221. /* Note: Must be called with sba->reqs_lock held */
  222. static void _sba_pending_request(struct sba_device *sba,
  223. struct sba_request *req)
  224. {
  225. lockdep_assert_held(&sba->reqs_lock);
  226. req->flags &= ~SBA_REQUEST_STATE_MASK;
  227. req->flags |= SBA_REQUEST_STATE_PENDING;
  228. list_move_tail(&req->node, &sba->reqs_pending_list);
  229. if (list_empty(&sba->reqs_active_list))
  230. sba->reqs_fence = false;
  231. }
  232. /* Note: Must be called with sba->reqs_lock held */
  233. static bool _sba_active_request(struct sba_device *sba,
  234. struct sba_request *req)
  235. {
  236. lockdep_assert_held(&sba->reqs_lock);
  237. if (list_empty(&sba->reqs_active_list))
  238. sba->reqs_fence = false;
  239. if (sba->reqs_fence)
  240. return false;
  241. req->flags &= ~SBA_REQUEST_STATE_MASK;
  242. req->flags |= SBA_REQUEST_STATE_ACTIVE;
  243. list_move_tail(&req->node, &sba->reqs_active_list);
  244. if (req->flags & SBA_REQUEST_FENCE)
  245. sba->reqs_fence = true;
  246. return true;
  247. }
  248. /* Note: Must be called with sba->reqs_lock held */
  249. static void _sba_abort_request(struct sba_device *sba,
  250. struct sba_request *req)
  251. {
  252. lockdep_assert_held(&sba->reqs_lock);
  253. req->flags &= ~SBA_REQUEST_STATE_MASK;
  254. req->flags |= SBA_REQUEST_STATE_ABORTED;
  255. list_move_tail(&req->node, &sba->reqs_aborted_list);
  256. if (list_empty(&sba->reqs_active_list))
  257. sba->reqs_fence = false;
  258. }
  259. /* Note: Must be called with sba->reqs_lock held */
  260. static void _sba_free_request(struct sba_device *sba,
  261. struct sba_request *req)
  262. {
  263. lockdep_assert_held(&sba->reqs_lock);
  264. req->flags &= ~SBA_REQUEST_STATE_MASK;
  265. req->flags |= SBA_REQUEST_STATE_FREE;
  266. list_move_tail(&req->node, &sba->reqs_free_list);
  267. if (list_empty(&sba->reqs_active_list))
  268. sba->reqs_fence = false;
  269. }
  270. static void sba_free_chained_requests(struct sba_request *req)
  271. {
  272. unsigned long flags;
  273. struct sba_request *nreq;
  274. struct sba_device *sba = req->sba;
  275. spin_lock_irqsave(&sba->reqs_lock, flags);
  276. _sba_free_request(sba, req);
  277. list_for_each_entry(nreq, &req->next, next)
  278. _sba_free_request(sba, nreq);
  279. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  280. }
  281. static void sba_chain_request(struct sba_request *first,
  282. struct sba_request *req)
  283. {
  284. unsigned long flags;
  285. struct sba_device *sba = req->sba;
  286. spin_lock_irqsave(&sba->reqs_lock, flags);
  287. list_add_tail(&req->next, &first->next);
  288. req->first = first;
  289. atomic_inc(&first->next_pending_count);
  290. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  291. }
  292. static void sba_cleanup_nonpending_requests(struct sba_device *sba)
  293. {
  294. unsigned long flags;
  295. struct sba_request *req, *req1;
  296. spin_lock_irqsave(&sba->reqs_lock, flags);
  297. /* Freeup all alloced request */
  298. list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
  299. _sba_free_request(sba, req);
  300. /* Set all active requests as aborted */
  301. list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
  302. _sba_abort_request(sba, req);
  303. /*
  304. * Note: We expect that aborted request will be eventually
  305. * freed by sba_receive_message()
  306. */
  307. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  308. }
  309. static void sba_cleanup_pending_requests(struct sba_device *sba)
  310. {
  311. unsigned long flags;
  312. struct sba_request *req, *req1;
  313. spin_lock_irqsave(&sba->reqs_lock, flags);
  314. /* Freeup all pending request */
  315. list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
  316. _sba_free_request(sba, req);
  317. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  318. }
  319. static int sba_send_mbox_request(struct sba_device *sba,
  320. struct sba_request *req)
  321. {
  322. int mchans_idx, ret = 0;
  323. /* Select mailbox channel in round-robin fashion */
  324. mchans_idx = atomic_inc_return(&sba->mchans_current);
  325. mchans_idx = mchans_idx % sba->mchans_count;
  326. /* Send message for the request */
  327. req->msg.error = 0;
  328. ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg);
  329. if (ret < 0) {
  330. dev_err(sba->dev, "send message failed with error %d", ret);
  331. return ret;
  332. }
  333. /* Check error returned by mailbox controller */
  334. ret = req->msg.error;
  335. if (ret < 0) {
  336. dev_err(sba->dev, "message error %d", ret);
  337. }
  338. /* Signal txdone for mailbox channel */
  339. mbox_client_txdone(sba->mchans[mchans_idx], ret);
  340. return ret;
  341. }
  342. /* Note: Must be called with sba->reqs_lock held */
  343. static void _sba_process_pending_requests(struct sba_device *sba)
  344. {
  345. int ret;
  346. u32 count;
  347. struct sba_request *req;
  348. /*
  349. * Process few pending requests
  350. *
  351. * For now, we process (<number_of_mailbox_channels> * 8)
  352. * number of requests at a time.
  353. */
  354. count = sba->mchans_count * 8;
  355. while (!list_empty(&sba->reqs_pending_list) && count) {
  356. /* Get the first pending request */
  357. req = list_first_entry(&sba->reqs_pending_list,
  358. struct sba_request, node);
  359. /* Try to make request active */
  360. if (!_sba_active_request(sba, req))
  361. break;
  362. /* Send request to mailbox channel */
  363. ret = sba_send_mbox_request(sba, req);
  364. if (ret < 0) {
  365. _sba_pending_request(sba, req);
  366. break;
  367. }
  368. count--;
  369. }
  370. }
  371. static void sba_process_received_request(struct sba_device *sba,
  372. struct sba_request *req)
  373. {
  374. unsigned long flags;
  375. struct dma_async_tx_descriptor *tx;
  376. struct sba_request *nreq, *first = req->first;
  377. /* Process only after all chained requests are received */
  378. if (!atomic_dec_return(&first->next_pending_count)) {
  379. tx = &first->tx;
  380. WARN_ON(tx->cookie < 0);
  381. if (tx->cookie > 0) {
  382. dma_cookie_complete(tx);
  383. dmaengine_desc_get_callback_invoke(tx, NULL);
  384. dma_descriptor_unmap(tx);
  385. tx->callback = NULL;
  386. tx->callback_result = NULL;
  387. }
  388. dma_run_dependencies(tx);
  389. spin_lock_irqsave(&sba->reqs_lock, flags);
  390. /* Free all requests chained to first request */
  391. list_for_each_entry(nreq, &first->next, next)
  392. _sba_free_request(sba, nreq);
  393. INIT_LIST_HEAD(&first->next);
  394. /* Free the first request */
  395. _sba_free_request(sba, first);
  396. /* Process pending requests */
  397. _sba_process_pending_requests(sba);
  398. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  399. }
  400. }
  401. static void sba_write_stats_in_seqfile(struct sba_device *sba,
  402. struct seq_file *file)
  403. {
  404. unsigned long flags;
  405. struct sba_request *req;
  406. u32 free_count = 0, alloced_count = 0;
  407. u32 pending_count = 0, active_count = 0, aborted_count = 0;
  408. spin_lock_irqsave(&sba->reqs_lock, flags);
  409. list_for_each_entry(req, &sba->reqs_free_list, node)
  410. if (async_tx_test_ack(&req->tx))
  411. free_count++;
  412. list_for_each_entry(req, &sba->reqs_alloc_list, node)
  413. alloced_count++;
  414. list_for_each_entry(req, &sba->reqs_pending_list, node)
  415. pending_count++;
  416. list_for_each_entry(req, &sba->reqs_active_list, node)
  417. active_count++;
  418. list_for_each_entry(req, &sba->reqs_aborted_list, node)
  419. aborted_count++;
  420. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  421. seq_printf(file, "maximum requests = %d\n", sba->max_req);
  422. seq_printf(file, "free requests = %d\n", free_count);
  423. seq_printf(file, "alloced requests = %d\n", alloced_count);
  424. seq_printf(file, "pending requests = %d\n", pending_count);
  425. seq_printf(file, "active requests = %d\n", active_count);
  426. seq_printf(file, "aborted requests = %d\n", aborted_count);
  427. }
  428. /* ====== DMAENGINE callbacks ===== */
  429. static void sba_free_chan_resources(struct dma_chan *dchan)
  430. {
  431. /*
  432. * Channel resources are pre-alloced so we just free-up
  433. * whatever we can so that we can re-use pre-alloced
  434. * channel resources next time.
  435. */
  436. sba_cleanup_nonpending_requests(to_sba_device(dchan));
  437. }
  438. static int sba_device_terminate_all(struct dma_chan *dchan)
  439. {
  440. /* Cleanup all pending requests */
  441. sba_cleanup_pending_requests(to_sba_device(dchan));
  442. return 0;
  443. }
  444. static void sba_issue_pending(struct dma_chan *dchan)
  445. {
  446. unsigned long flags;
  447. struct sba_device *sba = to_sba_device(dchan);
  448. /* Process pending requests */
  449. spin_lock_irqsave(&sba->reqs_lock, flags);
  450. _sba_process_pending_requests(sba);
  451. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  452. }
  453. static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx)
  454. {
  455. unsigned long flags;
  456. dma_cookie_t cookie;
  457. struct sba_device *sba;
  458. struct sba_request *req, *nreq;
  459. if (unlikely(!tx))
  460. return -EINVAL;
  461. sba = to_sba_device(tx->chan);
  462. req = to_sba_request(tx);
  463. /* Assign cookie and mark all chained requests pending */
  464. spin_lock_irqsave(&sba->reqs_lock, flags);
  465. cookie = dma_cookie_assign(tx);
  466. _sba_pending_request(sba, req);
  467. list_for_each_entry(nreq, &req->next, next)
  468. _sba_pending_request(sba, nreq);
  469. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  470. return cookie;
  471. }
  472. static enum dma_status sba_tx_status(struct dma_chan *dchan,
  473. dma_cookie_t cookie,
  474. struct dma_tx_state *txstate)
  475. {
  476. enum dma_status ret;
  477. struct sba_device *sba = to_sba_device(dchan);
  478. ret = dma_cookie_status(dchan, cookie, txstate);
  479. if (ret == DMA_COMPLETE)
  480. return ret;
  481. sba_peek_mchans(sba);
  482. return dma_cookie_status(dchan, cookie, txstate);
  483. }
  484. static void sba_fillup_interrupt_msg(struct sba_request *req,
  485. struct brcm_sba_command *cmds,
  486. struct brcm_message *msg)
  487. {
  488. u64 cmd;
  489. u32 c_mdata;
  490. dma_addr_t resp_dma = req->tx.phys;
  491. struct brcm_sba_command *cmdsp = cmds;
  492. /* Type-B command to load dummy data into buf0 */
  493. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  494. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  495. cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
  496. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  497. c_mdata = sba_cmd_load_c_mdata(0);
  498. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  499. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  500. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  501. SBA_CMD_SHIFT, SBA_CMD_MASK);
  502. cmdsp->cmd = cmd;
  503. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  504. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  505. cmdsp->data = resp_dma;
  506. cmdsp->data_len = req->sba->hw_resp_size;
  507. cmdsp++;
  508. /* Type-A command to write buf0 to dummy location */
  509. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  510. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  511. cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
  512. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  513. cmd = sba_cmd_enc(cmd, 0x1,
  514. SBA_RESP_SHIFT, SBA_RESP_MASK);
  515. c_mdata = sba_cmd_write_c_mdata(0);
  516. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  517. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  518. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  519. SBA_CMD_SHIFT, SBA_CMD_MASK);
  520. cmdsp->cmd = cmd;
  521. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  522. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  523. if (req->sba->hw_resp_size) {
  524. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  525. cmdsp->resp = resp_dma;
  526. cmdsp->resp_len = req->sba->hw_resp_size;
  527. }
  528. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  529. cmdsp->data = resp_dma;
  530. cmdsp->data_len = req->sba->hw_resp_size;
  531. cmdsp++;
  532. /* Fillup brcm_message */
  533. msg->type = BRCM_MESSAGE_SBA;
  534. msg->sba.cmds = cmds;
  535. msg->sba.cmds_count = cmdsp - cmds;
  536. msg->ctx = req;
  537. msg->error = 0;
  538. }
  539. static struct dma_async_tx_descriptor *
  540. sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
  541. {
  542. struct sba_request *req = NULL;
  543. struct sba_device *sba = to_sba_device(dchan);
  544. /* Alloc new request */
  545. req = sba_alloc_request(sba);
  546. if (!req)
  547. return NULL;
  548. /*
  549. * Force fence so that no requests are submitted
  550. * until DMA callback for this request is invoked.
  551. */
  552. req->flags |= SBA_REQUEST_FENCE;
  553. /* Fillup request message */
  554. sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
  555. /* Init async_tx descriptor */
  556. req->tx.flags = flags;
  557. req->tx.cookie = -EBUSY;
  558. return &req->tx;
  559. }
  560. static void sba_fillup_memcpy_msg(struct sba_request *req,
  561. struct brcm_sba_command *cmds,
  562. struct brcm_message *msg,
  563. dma_addr_t msg_offset, size_t msg_len,
  564. dma_addr_t dst, dma_addr_t src)
  565. {
  566. u64 cmd;
  567. u32 c_mdata;
  568. dma_addr_t resp_dma = req->tx.phys;
  569. struct brcm_sba_command *cmdsp = cmds;
  570. /* Type-B command to load data into buf0 */
  571. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  572. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  573. cmd = sba_cmd_enc(cmd, msg_len,
  574. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  575. c_mdata = sba_cmd_load_c_mdata(0);
  576. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  577. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  578. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  579. SBA_CMD_SHIFT, SBA_CMD_MASK);
  580. cmdsp->cmd = cmd;
  581. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  582. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  583. cmdsp->data = src + msg_offset;
  584. cmdsp->data_len = msg_len;
  585. cmdsp++;
  586. /* Type-A command to write buf0 */
  587. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  588. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  589. cmd = sba_cmd_enc(cmd, msg_len,
  590. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  591. cmd = sba_cmd_enc(cmd, 0x1,
  592. SBA_RESP_SHIFT, SBA_RESP_MASK);
  593. c_mdata = sba_cmd_write_c_mdata(0);
  594. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  595. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  596. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  597. SBA_CMD_SHIFT, SBA_CMD_MASK);
  598. cmdsp->cmd = cmd;
  599. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  600. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  601. if (req->sba->hw_resp_size) {
  602. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  603. cmdsp->resp = resp_dma;
  604. cmdsp->resp_len = req->sba->hw_resp_size;
  605. }
  606. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  607. cmdsp->data = dst + msg_offset;
  608. cmdsp->data_len = msg_len;
  609. cmdsp++;
  610. /* Fillup brcm_message */
  611. msg->type = BRCM_MESSAGE_SBA;
  612. msg->sba.cmds = cmds;
  613. msg->sba.cmds_count = cmdsp - cmds;
  614. msg->ctx = req;
  615. msg->error = 0;
  616. }
  617. static struct sba_request *
  618. sba_prep_dma_memcpy_req(struct sba_device *sba,
  619. dma_addr_t off, dma_addr_t dst, dma_addr_t src,
  620. size_t len, unsigned long flags)
  621. {
  622. struct sba_request *req = NULL;
  623. /* Alloc new request */
  624. req = sba_alloc_request(sba);
  625. if (!req)
  626. return NULL;
  627. if (flags & DMA_PREP_FENCE)
  628. req->flags |= SBA_REQUEST_FENCE;
  629. /* Fillup request message */
  630. sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
  631. off, len, dst, src);
  632. /* Init async_tx descriptor */
  633. req->tx.flags = flags;
  634. req->tx.cookie = -EBUSY;
  635. return req;
  636. }
  637. static struct dma_async_tx_descriptor *
  638. sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
  639. size_t len, unsigned long flags)
  640. {
  641. size_t req_len;
  642. dma_addr_t off = 0;
  643. struct sba_device *sba = to_sba_device(dchan);
  644. struct sba_request *first = NULL, *req;
  645. /* Create chained requests where each request is upto hw_buf_size */
  646. while (len) {
  647. req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
  648. req = sba_prep_dma_memcpy_req(sba, off, dst, src,
  649. req_len, flags);
  650. if (!req) {
  651. if (first)
  652. sba_free_chained_requests(first);
  653. return NULL;
  654. }
  655. if (first)
  656. sba_chain_request(first, req);
  657. else
  658. first = req;
  659. off += req_len;
  660. len -= req_len;
  661. }
  662. return (first) ? &first->tx : NULL;
  663. }
  664. static void sba_fillup_xor_msg(struct sba_request *req,
  665. struct brcm_sba_command *cmds,
  666. struct brcm_message *msg,
  667. dma_addr_t msg_offset, size_t msg_len,
  668. dma_addr_t dst, dma_addr_t *src, u32 src_cnt)
  669. {
  670. u64 cmd;
  671. u32 c_mdata;
  672. unsigned int i;
  673. dma_addr_t resp_dma = req->tx.phys;
  674. struct brcm_sba_command *cmdsp = cmds;
  675. /* Type-B command to load data into buf0 */
  676. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  677. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  678. cmd = sba_cmd_enc(cmd, msg_len,
  679. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  680. c_mdata = sba_cmd_load_c_mdata(0);
  681. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  682. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  683. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  684. SBA_CMD_SHIFT, SBA_CMD_MASK);
  685. cmdsp->cmd = cmd;
  686. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  687. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  688. cmdsp->data = src[0] + msg_offset;
  689. cmdsp->data_len = msg_len;
  690. cmdsp++;
  691. /* Type-B commands to xor data with buf0 and put it back in buf0 */
  692. for (i = 1; i < src_cnt; i++) {
  693. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  694. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  695. cmd = sba_cmd_enc(cmd, msg_len,
  696. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  697. c_mdata = sba_cmd_xor_c_mdata(0, 0);
  698. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  699. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  700. cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
  701. SBA_CMD_SHIFT, SBA_CMD_MASK);
  702. cmdsp->cmd = cmd;
  703. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  704. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  705. cmdsp->data = src[i] + msg_offset;
  706. cmdsp->data_len = msg_len;
  707. cmdsp++;
  708. }
  709. /* Type-A command to write buf0 */
  710. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  711. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  712. cmd = sba_cmd_enc(cmd, msg_len,
  713. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  714. cmd = sba_cmd_enc(cmd, 0x1,
  715. SBA_RESP_SHIFT, SBA_RESP_MASK);
  716. c_mdata = sba_cmd_write_c_mdata(0);
  717. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  718. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  719. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  720. SBA_CMD_SHIFT, SBA_CMD_MASK);
  721. cmdsp->cmd = cmd;
  722. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  723. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  724. if (req->sba->hw_resp_size) {
  725. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  726. cmdsp->resp = resp_dma;
  727. cmdsp->resp_len = req->sba->hw_resp_size;
  728. }
  729. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  730. cmdsp->data = dst + msg_offset;
  731. cmdsp->data_len = msg_len;
  732. cmdsp++;
  733. /* Fillup brcm_message */
  734. msg->type = BRCM_MESSAGE_SBA;
  735. msg->sba.cmds = cmds;
  736. msg->sba.cmds_count = cmdsp - cmds;
  737. msg->ctx = req;
  738. msg->error = 0;
  739. }
  740. static struct sba_request *
  741. sba_prep_dma_xor_req(struct sba_device *sba,
  742. dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
  743. u32 src_cnt, size_t len, unsigned long flags)
  744. {
  745. struct sba_request *req = NULL;
  746. /* Alloc new request */
  747. req = sba_alloc_request(sba);
  748. if (!req)
  749. return NULL;
  750. if (flags & DMA_PREP_FENCE)
  751. req->flags |= SBA_REQUEST_FENCE;
  752. /* Fillup request message */
  753. sba_fillup_xor_msg(req, req->cmds, &req->msg,
  754. off, len, dst, src, src_cnt);
  755. /* Init async_tx descriptor */
  756. req->tx.flags = flags;
  757. req->tx.cookie = -EBUSY;
  758. return req;
  759. }
  760. static struct dma_async_tx_descriptor *
  761. sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
  762. u32 src_cnt, size_t len, unsigned long flags)
  763. {
  764. size_t req_len;
  765. dma_addr_t off = 0;
  766. struct sba_device *sba = to_sba_device(dchan);
  767. struct sba_request *first = NULL, *req;
  768. /* Sanity checks */
  769. if (unlikely(src_cnt > sba->max_xor_srcs))
  770. return NULL;
  771. /* Create chained requests where each request is upto hw_buf_size */
  772. while (len) {
  773. req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
  774. req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
  775. req_len, flags);
  776. if (!req) {
  777. if (first)
  778. sba_free_chained_requests(first);
  779. return NULL;
  780. }
  781. if (first)
  782. sba_chain_request(first, req);
  783. else
  784. first = req;
  785. off += req_len;
  786. len -= req_len;
  787. }
  788. return (first) ? &first->tx : NULL;
  789. }
  790. static void sba_fillup_pq_msg(struct sba_request *req,
  791. bool pq_continue,
  792. struct brcm_sba_command *cmds,
  793. struct brcm_message *msg,
  794. dma_addr_t msg_offset, size_t msg_len,
  795. dma_addr_t *dst_p, dma_addr_t *dst_q,
  796. const u8 *scf, dma_addr_t *src, u32 src_cnt)
  797. {
  798. u64 cmd;
  799. u32 c_mdata;
  800. unsigned int i;
  801. dma_addr_t resp_dma = req->tx.phys;
  802. struct brcm_sba_command *cmdsp = cmds;
  803. if (pq_continue) {
  804. /* Type-B command to load old P into buf0 */
  805. if (dst_p) {
  806. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  807. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  808. cmd = sba_cmd_enc(cmd, msg_len,
  809. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  810. c_mdata = sba_cmd_load_c_mdata(0);
  811. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  812. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  813. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  814. SBA_CMD_SHIFT, SBA_CMD_MASK);
  815. cmdsp->cmd = cmd;
  816. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  817. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  818. cmdsp->data = *dst_p + msg_offset;
  819. cmdsp->data_len = msg_len;
  820. cmdsp++;
  821. }
  822. /* Type-B command to load old Q into buf1 */
  823. if (dst_q) {
  824. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  825. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  826. cmd = sba_cmd_enc(cmd, msg_len,
  827. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  828. c_mdata = sba_cmd_load_c_mdata(1);
  829. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  830. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  831. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  832. SBA_CMD_SHIFT, SBA_CMD_MASK);
  833. cmdsp->cmd = cmd;
  834. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  835. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  836. cmdsp->data = *dst_q + msg_offset;
  837. cmdsp->data_len = msg_len;
  838. cmdsp++;
  839. }
  840. } else {
  841. /* Type-A command to zero all buffers */
  842. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  843. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  844. cmd = sba_cmd_enc(cmd, msg_len,
  845. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  846. cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
  847. SBA_CMD_SHIFT, SBA_CMD_MASK);
  848. cmdsp->cmd = cmd;
  849. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  850. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  851. cmdsp++;
  852. }
  853. /* Type-B commands for generate P onto buf0 and Q onto buf1 */
  854. for (i = 0; i < src_cnt; i++) {
  855. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  856. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  857. cmd = sba_cmd_enc(cmd, msg_len,
  858. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  859. c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0);
  860. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  861. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  862. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
  863. SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
  864. cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR,
  865. SBA_CMD_SHIFT, SBA_CMD_MASK);
  866. cmdsp->cmd = cmd;
  867. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  868. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  869. cmdsp->data = src[i] + msg_offset;
  870. cmdsp->data_len = msg_len;
  871. cmdsp++;
  872. }
  873. /* Type-A command to write buf0 */
  874. if (dst_p) {
  875. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  876. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  877. cmd = sba_cmd_enc(cmd, msg_len,
  878. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  879. cmd = sba_cmd_enc(cmd, 0x1,
  880. SBA_RESP_SHIFT, SBA_RESP_MASK);
  881. c_mdata = sba_cmd_write_c_mdata(0);
  882. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  883. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  884. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  885. SBA_CMD_SHIFT, SBA_CMD_MASK);
  886. cmdsp->cmd = cmd;
  887. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  888. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  889. if (req->sba->hw_resp_size) {
  890. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  891. cmdsp->resp = resp_dma;
  892. cmdsp->resp_len = req->sba->hw_resp_size;
  893. }
  894. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  895. cmdsp->data = *dst_p + msg_offset;
  896. cmdsp->data_len = msg_len;
  897. cmdsp++;
  898. }
  899. /* Type-A command to write buf1 */
  900. if (dst_q) {
  901. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  902. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  903. cmd = sba_cmd_enc(cmd, msg_len,
  904. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  905. cmd = sba_cmd_enc(cmd, 0x1,
  906. SBA_RESP_SHIFT, SBA_RESP_MASK);
  907. c_mdata = sba_cmd_write_c_mdata(1);
  908. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  909. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  910. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  911. SBA_CMD_SHIFT, SBA_CMD_MASK);
  912. cmdsp->cmd = cmd;
  913. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  914. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  915. if (req->sba->hw_resp_size) {
  916. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  917. cmdsp->resp = resp_dma;
  918. cmdsp->resp_len = req->sba->hw_resp_size;
  919. }
  920. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  921. cmdsp->data = *dst_q + msg_offset;
  922. cmdsp->data_len = msg_len;
  923. cmdsp++;
  924. }
  925. /* Fillup brcm_message */
  926. msg->type = BRCM_MESSAGE_SBA;
  927. msg->sba.cmds = cmds;
  928. msg->sba.cmds_count = cmdsp - cmds;
  929. msg->ctx = req;
  930. msg->error = 0;
  931. }
  932. static struct sba_request *
  933. sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
  934. dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
  935. u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
  936. {
  937. struct sba_request *req = NULL;
  938. /* Alloc new request */
  939. req = sba_alloc_request(sba);
  940. if (!req)
  941. return NULL;
  942. if (flags & DMA_PREP_FENCE)
  943. req->flags |= SBA_REQUEST_FENCE;
  944. /* Fillup request messages */
  945. sba_fillup_pq_msg(req, dmaf_continue(flags),
  946. req->cmds, &req->msg,
  947. off, len, dst_p, dst_q, scf, src, src_cnt);
  948. /* Init async_tx descriptor */
  949. req->tx.flags = flags;
  950. req->tx.cookie = -EBUSY;
  951. return req;
  952. }
  953. static void sba_fillup_pq_single_msg(struct sba_request *req,
  954. bool pq_continue,
  955. struct brcm_sba_command *cmds,
  956. struct brcm_message *msg,
  957. dma_addr_t msg_offset, size_t msg_len,
  958. dma_addr_t *dst_p, dma_addr_t *dst_q,
  959. dma_addr_t src, u8 scf)
  960. {
  961. u64 cmd;
  962. u32 c_mdata;
  963. u8 pos, dpos = raid6_gflog[scf];
  964. dma_addr_t resp_dma = req->tx.phys;
  965. struct brcm_sba_command *cmdsp = cmds;
  966. if (!dst_p)
  967. goto skip_p;
  968. if (pq_continue) {
  969. /* Type-B command to load old P into buf0 */
  970. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  971. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  972. cmd = sba_cmd_enc(cmd, msg_len,
  973. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  974. c_mdata = sba_cmd_load_c_mdata(0);
  975. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  976. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  977. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  978. SBA_CMD_SHIFT, SBA_CMD_MASK);
  979. cmdsp->cmd = cmd;
  980. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  981. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  982. cmdsp->data = *dst_p + msg_offset;
  983. cmdsp->data_len = msg_len;
  984. cmdsp++;
  985. /*
  986. * Type-B commands to xor data with buf0 and put it
  987. * back in buf0
  988. */
  989. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  990. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  991. cmd = sba_cmd_enc(cmd, msg_len,
  992. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  993. c_mdata = sba_cmd_xor_c_mdata(0, 0);
  994. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  995. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  996. cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
  997. SBA_CMD_SHIFT, SBA_CMD_MASK);
  998. cmdsp->cmd = cmd;
  999. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1000. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  1001. cmdsp->data = src + msg_offset;
  1002. cmdsp->data_len = msg_len;
  1003. cmdsp++;
  1004. } else {
  1005. /* Type-B command to load old P into buf0 */
  1006. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  1007. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1008. cmd = sba_cmd_enc(cmd, msg_len,
  1009. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1010. c_mdata = sba_cmd_load_c_mdata(0);
  1011. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1012. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1013. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  1014. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1015. cmdsp->cmd = cmd;
  1016. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1017. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  1018. cmdsp->data = src + msg_offset;
  1019. cmdsp->data_len = msg_len;
  1020. cmdsp++;
  1021. }
  1022. /* Type-A command to write buf0 */
  1023. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1024. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1025. cmd = sba_cmd_enc(cmd, msg_len,
  1026. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1027. cmd = sba_cmd_enc(cmd, 0x1,
  1028. SBA_RESP_SHIFT, SBA_RESP_MASK);
  1029. c_mdata = sba_cmd_write_c_mdata(0);
  1030. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1031. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1032. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  1033. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1034. cmdsp->cmd = cmd;
  1035. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1036. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1037. if (req->sba->hw_resp_size) {
  1038. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  1039. cmdsp->resp = resp_dma;
  1040. cmdsp->resp_len = req->sba->hw_resp_size;
  1041. }
  1042. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  1043. cmdsp->data = *dst_p + msg_offset;
  1044. cmdsp->data_len = msg_len;
  1045. cmdsp++;
  1046. skip_p:
  1047. if (!dst_q)
  1048. goto skip_q;
  1049. /* Type-A command to zero all buffers */
  1050. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1051. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1052. cmd = sba_cmd_enc(cmd, msg_len,
  1053. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1054. cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
  1055. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1056. cmdsp->cmd = cmd;
  1057. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1058. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1059. cmdsp++;
  1060. if (dpos == 255)
  1061. goto skip_q_computation;
  1062. pos = (dpos < req->sba->max_pq_coefs) ?
  1063. dpos : (req->sba->max_pq_coefs - 1);
  1064. /*
  1065. * Type-B command to generate initial Q from data
  1066. * and store output into buf0
  1067. */
  1068. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  1069. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1070. cmd = sba_cmd_enc(cmd, msg_len,
  1071. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1072. c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0);
  1073. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1074. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1075. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
  1076. SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
  1077. cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
  1078. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1079. cmdsp->cmd = cmd;
  1080. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1081. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  1082. cmdsp->data = src + msg_offset;
  1083. cmdsp->data_len = msg_len;
  1084. cmdsp++;
  1085. dpos -= pos;
  1086. /* Multiple Type-A command to generate final Q */
  1087. while (dpos) {
  1088. pos = (dpos < req->sba->max_pq_coefs) ?
  1089. dpos : (req->sba->max_pq_coefs - 1);
  1090. /*
  1091. * Type-A command to generate Q with buf0 and
  1092. * buf1 store result in buf0
  1093. */
  1094. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1095. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1096. cmd = sba_cmd_enc(cmd, msg_len,
  1097. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1098. c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1);
  1099. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1100. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1101. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
  1102. SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
  1103. cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
  1104. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1105. cmdsp->cmd = cmd;
  1106. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1107. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1108. cmdsp++;
  1109. dpos -= pos;
  1110. }
  1111. skip_q_computation:
  1112. if (pq_continue) {
  1113. /*
  1114. * Type-B command to XOR previous output with
  1115. * buf0 and write it into buf0
  1116. */
  1117. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  1118. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1119. cmd = sba_cmd_enc(cmd, msg_len,
  1120. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1121. c_mdata = sba_cmd_xor_c_mdata(0, 0);
  1122. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1123. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1124. cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
  1125. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1126. cmdsp->cmd = cmd;
  1127. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1128. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  1129. cmdsp->data = *dst_q + msg_offset;
  1130. cmdsp->data_len = msg_len;
  1131. cmdsp++;
  1132. }
  1133. /* Type-A command to write buf0 */
  1134. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1135. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1136. cmd = sba_cmd_enc(cmd, msg_len,
  1137. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1138. cmd = sba_cmd_enc(cmd, 0x1,
  1139. SBA_RESP_SHIFT, SBA_RESP_MASK);
  1140. c_mdata = sba_cmd_write_c_mdata(0);
  1141. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1142. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1143. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  1144. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1145. cmdsp->cmd = cmd;
  1146. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1147. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1148. if (req->sba->hw_resp_size) {
  1149. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  1150. cmdsp->resp = resp_dma;
  1151. cmdsp->resp_len = req->sba->hw_resp_size;
  1152. }
  1153. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  1154. cmdsp->data = *dst_q + msg_offset;
  1155. cmdsp->data_len = msg_len;
  1156. cmdsp++;
  1157. skip_q:
  1158. /* Fillup brcm_message */
  1159. msg->type = BRCM_MESSAGE_SBA;
  1160. msg->sba.cmds = cmds;
  1161. msg->sba.cmds_count = cmdsp - cmds;
  1162. msg->ctx = req;
  1163. msg->error = 0;
  1164. }
  1165. static struct sba_request *
  1166. sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
  1167. dma_addr_t *dst_p, dma_addr_t *dst_q,
  1168. dma_addr_t src, u8 scf, size_t len,
  1169. unsigned long flags)
  1170. {
  1171. struct sba_request *req = NULL;
  1172. /* Alloc new request */
  1173. req = sba_alloc_request(sba);
  1174. if (!req)
  1175. return NULL;
  1176. if (flags & DMA_PREP_FENCE)
  1177. req->flags |= SBA_REQUEST_FENCE;
  1178. /* Fillup request messages */
  1179. sba_fillup_pq_single_msg(req, dmaf_continue(flags),
  1180. req->cmds, &req->msg, off, len,
  1181. dst_p, dst_q, src, scf);
  1182. /* Init async_tx descriptor */
  1183. req->tx.flags = flags;
  1184. req->tx.cookie = -EBUSY;
  1185. return req;
  1186. }
  1187. static struct dma_async_tx_descriptor *
  1188. sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
  1189. u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
  1190. {
  1191. u32 i, dst_q_index;
  1192. size_t req_len;
  1193. bool slow = false;
  1194. dma_addr_t off = 0;
  1195. dma_addr_t *dst_p = NULL, *dst_q = NULL;
  1196. struct sba_device *sba = to_sba_device(dchan);
  1197. struct sba_request *first = NULL, *req;
  1198. /* Sanity checks */
  1199. if (unlikely(src_cnt > sba->max_pq_srcs))
  1200. return NULL;
  1201. for (i = 0; i < src_cnt; i++)
  1202. if (sba->max_pq_coefs <= raid6_gflog[scf[i]])
  1203. slow = true;
  1204. /* Figure-out P and Q destination addresses */
  1205. if (!(flags & DMA_PREP_PQ_DISABLE_P))
  1206. dst_p = &dst[0];
  1207. if (!(flags & DMA_PREP_PQ_DISABLE_Q))
  1208. dst_q = &dst[1];
  1209. /* Create chained requests where each request is upto hw_buf_size */
  1210. while (len) {
  1211. req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
  1212. if (slow) {
  1213. dst_q_index = src_cnt;
  1214. if (dst_q) {
  1215. for (i = 0; i < src_cnt; i++) {
  1216. if (*dst_q == src[i]) {
  1217. dst_q_index = i;
  1218. break;
  1219. }
  1220. }
  1221. }
  1222. if (dst_q_index < src_cnt) {
  1223. i = dst_q_index;
  1224. req = sba_prep_dma_pq_single_req(sba,
  1225. off, dst_p, dst_q, src[i], scf[i],
  1226. req_len, flags | DMA_PREP_FENCE);
  1227. if (!req)
  1228. goto fail;
  1229. if (first)
  1230. sba_chain_request(first, req);
  1231. else
  1232. first = req;
  1233. flags |= DMA_PREP_CONTINUE;
  1234. }
  1235. for (i = 0; i < src_cnt; i++) {
  1236. if (dst_q_index == i)
  1237. continue;
  1238. req = sba_prep_dma_pq_single_req(sba,
  1239. off, dst_p, dst_q, src[i], scf[i],
  1240. req_len, flags | DMA_PREP_FENCE);
  1241. if (!req)
  1242. goto fail;
  1243. if (first)
  1244. sba_chain_request(first, req);
  1245. else
  1246. first = req;
  1247. flags |= DMA_PREP_CONTINUE;
  1248. }
  1249. } else {
  1250. req = sba_prep_dma_pq_req(sba, off,
  1251. dst_p, dst_q, src, src_cnt,
  1252. scf, req_len, flags);
  1253. if (!req)
  1254. goto fail;
  1255. if (first)
  1256. sba_chain_request(first, req);
  1257. else
  1258. first = req;
  1259. }
  1260. off += req_len;
  1261. len -= req_len;
  1262. }
  1263. return (first) ? &first->tx : NULL;
  1264. fail:
  1265. if (first)
  1266. sba_free_chained_requests(first);
  1267. return NULL;
  1268. }
  1269. /* ====== Mailbox callbacks ===== */
  1270. static void sba_receive_message(struct mbox_client *cl, void *msg)
  1271. {
  1272. struct brcm_message *m = msg;
  1273. struct sba_request *req = m->ctx;
  1274. struct sba_device *sba = req->sba;
  1275. /* Error count if message has error */
  1276. if (m->error < 0)
  1277. dev_err(sba->dev, "%s got message with error %d",
  1278. dma_chan_name(&sba->dma_chan), m->error);
  1279. /* Process received request */
  1280. sba_process_received_request(sba, req);
  1281. }
  1282. /* ====== Debugfs callbacks ====== */
  1283. static int sba_debugfs_stats_show(struct seq_file *file, void *offset)
  1284. {
  1285. struct platform_device *pdev = to_platform_device(file->private);
  1286. struct sba_device *sba = platform_get_drvdata(pdev);
  1287. /* Write stats in file */
  1288. sba_write_stats_in_seqfile(sba, file);
  1289. return 0;
  1290. }
  1291. /* ====== Platform driver routines ===== */
  1292. static int sba_prealloc_channel_resources(struct sba_device *sba)
  1293. {
  1294. int i, j, ret = 0;
  1295. struct sba_request *req = NULL;
  1296. sba->resp_base = dma_alloc_coherent(sba->mbox_dev,
  1297. sba->max_resp_pool_size,
  1298. &sba->resp_dma_base, GFP_KERNEL);
  1299. if (!sba->resp_base)
  1300. return -ENOMEM;
  1301. sba->cmds_base = dma_alloc_coherent(sba->mbox_dev,
  1302. sba->max_cmds_pool_size,
  1303. &sba->cmds_dma_base, GFP_KERNEL);
  1304. if (!sba->cmds_base) {
  1305. ret = -ENOMEM;
  1306. goto fail_free_resp_pool;
  1307. }
  1308. spin_lock_init(&sba->reqs_lock);
  1309. sba->reqs_fence = false;
  1310. INIT_LIST_HEAD(&sba->reqs_alloc_list);
  1311. INIT_LIST_HEAD(&sba->reqs_pending_list);
  1312. INIT_LIST_HEAD(&sba->reqs_active_list);
  1313. INIT_LIST_HEAD(&sba->reqs_aborted_list);
  1314. INIT_LIST_HEAD(&sba->reqs_free_list);
  1315. for (i = 0; i < sba->max_req; i++) {
  1316. req = devm_kzalloc(sba->dev,
  1317. sizeof(*req) +
  1318. sba->max_cmd_per_req * sizeof(req->cmds[0]),
  1319. GFP_KERNEL);
  1320. if (!req) {
  1321. ret = -ENOMEM;
  1322. goto fail_free_cmds_pool;
  1323. }
  1324. INIT_LIST_HEAD(&req->node);
  1325. req->sba = sba;
  1326. req->flags = SBA_REQUEST_STATE_FREE;
  1327. INIT_LIST_HEAD(&req->next);
  1328. atomic_set(&req->next_pending_count, 0);
  1329. for (j = 0; j < sba->max_cmd_per_req; j++) {
  1330. req->cmds[j].cmd = 0;
  1331. req->cmds[j].cmd_dma = sba->cmds_base +
  1332. (i * sba->max_cmd_per_req + j) * sizeof(u64);
  1333. req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
  1334. (i * sba->max_cmd_per_req + j) * sizeof(u64);
  1335. req->cmds[j].flags = 0;
  1336. }
  1337. memset(&req->msg, 0, sizeof(req->msg));
  1338. dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
  1339. async_tx_ack(&req->tx);
  1340. req->tx.tx_submit = sba_tx_submit;
  1341. req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
  1342. list_add_tail(&req->node, &sba->reqs_free_list);
  1343. }
  1344. return 0;
  1345. fail_free_cmds_pool:
  1346. dma_free_coherent(sba->mbox_dev,
  1347. sba->max_cmds_pool_size,
  1348. sba->cmds_base, sba->cmds_dma_base);
  1349. fail_free_resp_pool:
  1350. dma_free_coherent(sba->mbox_dev,
  1351. sba->max_resp_pool_size,
  1352. sba->resp_base, sba->resp_dma_base);
  1353. return ret;
  1354. }
  1355. static void sba_freeup_channel_resources(struct sba_device *sba)
  1356. {
  1357. dmaengine_terminate_all(&sba->dma_chan);
  1358. dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size,
  1359. sba->cmds_base, sba->cmds_dma_base);
  1360. dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size,
  1361. sba->resp_base, sba->resp_dma_base);
  1362. sba->resp_base = NULL;
  1363. sba->resp_dma_base = 0;
  1364. }
  1365. static int sba_async_register(struct sba_device *sba)
  1366. {
  1367. int ret;
  1368. struct dma_device *dma_dev = &sba->dma_dev;
  1369. /* Initialize DMA channel cookie */
  1370. sba->dma_chan.device = dma_dev;
  1371. dma_cookie_init(&sba->dma_chan);
  1372. /* Initialize DMA device capability mask */
  1373. dma_cap_zero(dma_dev->cap_mask);
  1374. dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
  1375. dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
  1376. dma_cap_set(DMA_XOR, dma_dev->cap_mask);
  1377. dma_cap_set(DMA_PQ, dma_dev->cap_mask);
  1378. /*
  1379. * Set mailbox channel device as the base device of
  1380. * our dma_device because the actual memory accesses
  1381. * will be done by mailbox controller
  1382. */
  1383. dma_dev->dev = sba->mbox_dev;
  1384. /* Set base prep routines */
  1385. dma_dev->device_free_chan_resources = sba_free_chan_resources;
  1386. dma_dev->device_terminate_all = sba_device_terminate_all;
  1387. dma_dev->device_issue_pending = sba_issue_pending;
  1388. dma_dev->device_tx_status = sba_tx_status;
  1389. /* Set interrupt routine */
  1390. if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
  1391. dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt;
  1392. /* Set memcpy routine */
  1393. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
  1394. dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy;
  1395. /* Set xor routine and capability */
  1396. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  1397. dma_dev->device_prep_dma_xor = sba_prep_dma_xor;
  1398. dma_dev->max_xor = sba->max_xor_srcs;
  1399. }
  1400. /* Set pq routine and capability */
  1401. if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
  1402. dma_dev->device_prep_dma_pq = sba_prep_dma_pq;
  1403. dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0);
  1404. }
  1405. /* Initialize DMA device channel list */
  1406. INIT_LIST_HEAD(&dma_dev->channels);
  1407. list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels);
  1408. /* Register with Linux async DMA framework*/
  1409. ret = dma_async_device_register(dma_dev);
  1410. if (ret) {
  1411. dev_err(sba->dev, "async device register error %d", ret);
  1412. return ret;
  1413. }
  1414. dev_info(sba->dev, "%s capabilities: %s%s%s%s\n",
  1415. dma_chan_name(&sba->dma_chan),
  1416. dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "",
  1417. dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "",
  1418. dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
  1419. dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "");
  1420. return 0;
  1421. }
  1422. static int sba_probe(struct platform_device *pdev)
  1423. {
  1424. int i, ret = 0, mchans_count;
  1425. struct sba_device *sba;
  1426. struct platform_device *mbox_pdev;
  1427. struct of_phandle_args args;
  1428. /* Allocate main SBA struct */
  1429. sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL);
  1430. if (!sba)
  1431. return -ENOMEM;
  1432. sba->dev = &pdev->dev;
  1433. platform_set_drvdata(pdev, sba);
  1434. /* Number of channels equals number of mailbox channels */
  1435. ret = of_count_phandle_with_args(pdev->dev.of_node,
  1436. "mboxes", "#mbox-cells");
  1437. if (ret <= 0)
  1438. return -ENODEV;
  1439. mchans_count = ret;
  1440. /* Determine SBA version from DT compatible string */
  1441. if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
  1442. sba->ver = SBA_VER_1;
  1443. else if (of_device_is_compatible(sba->dev->of_node,
  1444. "brcm,iproc-sba-v2"))
  1445. sba->ver = SBA_VER_2;
  1446. else
  1447. return -ENODEV;
  1448. /* Derived Configuration parameters */
  1449. switch (sba->ver) {
  1450. case SBA_VER_1:
  1451. sba->hw_buf_size = 4096;
  1452. sba->hw_resp_size = 8;
  1453. sba->max_pq_coefs = 6;
  1454. sba->max_pq_srcs = 6;
  1455. break;
  1456. case SBA_VER_2:
  1457. sba->hw_buf_size = 4096;
  1458. sba->hw_resp_size = 8;
  1459. sba->max_pq_coefs = 30;
  1460. /*
  1461. * We can support max_pq_srcs == max_pq_coefs because
  1462. * we are limited by number of SBA commands that we can
  1463. * fit in one message for underlying ring manager HW.
  1464. */
  1465. sba->max_pq_srcs = 12;
  1466. break;
  1467. default:
  1468. return -EINVAL;
  1469. }
  1470. sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count;
  1471. sba->max_cmd_per_req = sba->max_pq_srcs + 3;
  1472. sba->max_xor_srcs = sba->max_cmd_per_req - 1;
  1473. sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
  1474. sba->max_cmds_pool_size = sba->max_req *
  1475. sba->max_cmd_per_req * sizeof(u64);
  1476. /* Setup mailbox client */
  1477. sba->client.dev = &pdev->dev;
  1478. sba->client.rx_callback = sba_receive_message;
  1479. sba->client.tx_block = false;
  1480. sba->client.knows_txdone = true;
  1481. sba->client.tx_tout = 0;
  1482. /* Allocate mailbox channel array */
  1483. sba->mchans = devm_kcalloc(&pdev->dev, mchans_count,
  1484. sizeof(*sba->mchans), GFP_KERNEL);
  1485. if (!sba->mchans)
  1486. return -ENOMEM;
  1487. /* Request mailbox channels */
  1488. sba->mchans_count = 0;
  1489. for (i = 0; i < mchans_count; i++) {
  1490. sba->mchans[i] = mbox_request_channel(&sba->client, i);
  1491. if (IS_ERR(sba->mchans[i])) {
  1492. ret = PTR_ERR(sba->mchans[i]);
  1493. goto fail_free_mchans;
  1494. }
  1495. sba->mchans_count++;
  1496. }
  1497. atomic_set(&sba->mchans_current, 0);
  1498. /* Find-out underlying mailbox device */
  1499. ret = of_parse_phandle_with_args(pdev->dev.of_node,
  1500. "mboxes", "#mbox-cells", 0, &args);
  1501. if (ret)
  1502. goto fail_free_mchans;
  1503. mbox_pdev = of_find_device_by_node(args.np);
  1504. of_node_put(args.np);
  1505. if (!mbox_pdev) {
  1506. ret = -ENODEV;
  1507. goto fail_free_mchans;
  1508. }
  1509. sba->mbox_dev = &mbox_pdev->dev;
  1510. /* All mailbox channels should be of same ring manager device */
  1511. for (i = 1; i < mchans_count; i++) {
  1512. ret = of_parse_phandle_with_args(pdev->dev.of_node,
  1513. "mboxes", "#mbox-cells", i, &args);
  1514. if (ret)
  1515. goto fail_free_mchans;
  1516. mbox_pdev = of_find_device_by_node(args.np);
  1517. of_node_put(args.np);
  1518. if (sba->mbox_dev != &mbox_pdev->dev) {
  1519. ret = -EINVAL;
  1520. goto fail_free_mchans;
  1521. }
  1522. }
  1523. /* Prealloc channel resource */
  1524. ret = sba_prealloc_channel_resources(sba);
  1525. if (ret)
  1526. goto fail_free_mchans;
  1527. /* Check availability of debugfs */
  1528. if (!debugfs_initialized())
  1529. goto skip_debugfs;
  1530. /* Create debugfs root entry */
  1531. sba->root = debugfs_create_dir(dev_name(sba->dev), NULL);
  1532. if (IS_ERR_OR_NULL(sba->root)) {
  1533. dev_err(sba->dev, "failed to create debugfs root entry\n");
  1534. sba->root = NULL;
  1535. goto skip_debugfs;
  1536. }
  1537. /* Create debugfs stats entry */
  1538. sba->stats = debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
  1539. sba_debugfs_stats_show);
  1540. if (IS_ERR_OR_NULL(sba->stats))
  1541. dev_err(sba->dev, "failed to create debugfs stats file\n");
  1542. skip_debugfs:
  1543. /* Register DMA device with Linux async framework */
  1544. ret = sba_async_register(sba);
  1545. if (ret)
  1546. goto fail_free_resources;
  1547. /* Print device info */
  1548. dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels",
  1549. dma_chan_name(&sba->dma_chan), sba->ver+1,
  1550. sba->mchans_count);
  1551. return 0;
  1552. fail_free_resources:
  1553. debugfs_remove_recursive(sba->root);
  1554. sba_freeup_channel_resources(sba);
  1555. fail_free_mchans:
  1556. for (i = 0; i < sba->mchans_count; i++)
  1557. mbox_free_channel(sba->mchans[i]);
  1558. return ret;
  1559. }
  1560. static int sba_remove(struct platform_device *pdev)
  1561. {
  1562. int i;
  1563. struct sba_device *sba = platform_get_drvdata(pdev);
  1564. dma_async_device_unregister(&sba->dma_dev);
  1565. debugfs_remove_recursive(sba->root);
  1566. sba_freeup_channel_resources(sba);
  1567. for (i = 0; i < sba->mchans_count; i++)
  1568. mbox_free_channel(sba->mchans[i]);
  1569. return 0;
  1570. }
  1571. static const struct of_device_id sba_of_match[] = {
  1572. { .compatible = "brcm,iproc-sba", },
  1573. { .compatible = "brcm,iproc-sba-v2", },
  1574. {},
  1575. };
  1576. MODULE_DEVICE_TABLE(of, sba_of_match);
  1577. static struct platform_driver sba_driver = {
  1578. .probe = sba_probe,
  1579. .remove = sba_remove,
  1580. .driver = {
  1581. .name = "bcm-sba-raid",
  1582. .of_match_table = sba_of_match,
  1583. },
  1584. };
  1585. module_platform_driver(sba_driver);
  1586. MODULE_DESCRIPTION("Broadcom SBA RAID driver");
  1587. MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
  1588. MODULE_LICENSE("GPL v2");