iop3xx-adma.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933
  1. /*
  2. * Copyright © 2006, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. */
  18. #ifndef _ADMA_H
  19. #define _ADMA_H
  20. #include <linux/types.h>
  21. #include <linux/io.h>
  22. #include <mach/hardware.h>
  23. #include <asm/hardware/iop_adma.h>
  24. /* Memory copy units */
  25. #define DMA_CCR(chan) (chan->mmr_base + 0x0)
  26. #define DMA_CSR(chan) (chan->mmr_base + 0x4)
  27. #define DMA_DAR(chan) (chan->mmr_base + 0xc)
  28. #define DMA_NDAR(chan) (chan->mmr_base + 0x10)
  29. #define DMA_PADR(chan) (chan->mmr_base + 0x14)
  30. #define DMA_PUADR(chan) (chan->mmr_base + 0x18)
  31. #define DMA_LADR(chan) (chan->mmr_base + 0x1c)
  32. #define DMA_BCR(chan) (chan->mmr_base + 0x20)
  33. #define DMA_DCR(chan) (chan->mmr_base + 0x24)
  34. /* Application accelerator unit */
  35. #define AAU_ACR(chan) (chan->mmr_base + 0x0)
  36. #define AAU_ASR(chan) (chan->mmr_base + 0x4)
  37. #define AAU_ADAR(chan) (chan->mmr_base + 0x8)
  38. #define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
  39. #define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
  40. #define AAU_DAR(chan) (chan->mmr_base + 0x20)
  41. #define AAU_ABCR(chan) (chan->mmr_base + 0x24)
  42. #define AAU_ADCR(chan) (chan->mmr_base + 0x28)
  43. #define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
  44. #define AAU_EDCR0_IDX 8
  45. #define AAU_EDCR1_IDX 17
  46. #define AAU_EDCR2_IDX 26
  47. #define DMA0_ID 0
  48. #define DMA1_ID 1
  49. #define AAU_ID 2
  50. struct iop3xx_aau_desc_ctrl {
  51. unsigned int int_en:1;
  52. unsigned int blk1_cmd_ctrl:3;
  53. unsigned int blk2_cmd_ctrl:3;
  54. unsigned int blk3_cmd_ctrl:3;
  55. unsigned int blk4_cmd_ctrl:3;
  56. unsigned int blk5_cmd_ctrl:3;
  57. unsigned int blk6_cmd_ctrl:3;
  58. unsigned int blk7_cmd_ctrl:3;
  59. unsigned int blk8_cmd_ctrl:3;
  60. unsigned int blk_ctrl:2;
  61. unsigned int dual_xor_en:1;
  62. unsigned int tx_complete:1;
  63. unsigned int zero_result_err:1;
  64. unsigned int zero_result_en:1;
  65. unsigned int dest_write_en:1;
  66. };
  67. struct iop3xx_aau_e_desc_ctrl {
  68. unsigned int reserved:1;
  69. unsigned int blk1_cmd_ctrl:3;
  70. unsigned int blk2_cmd_ctrl:3;
  71. unsigned int blk3_cmd_ctrl:3;
  72. unsigned int blk4_cmd_ctrl:3;
  73. unsigned int blk5_cmd_ctrl:3;
  74. unsigned int blk6_cmd_ctrl:3;
  75. unsigned int blk7_cmd_ctrl:3;
  76. unsigned int blk8_cmd_ctrl:3;
  77. unsigned int reserved2:7;
  78. };
  79. struct iop3xx_dma_desc_ctrl {
  80. unsigned int pci_transaction:4;
  81. unsigned int int_en:1;
  82. unsigned int dac_cycle_en:1;
  83. unsigned int mem_to_mem_en:1;
  84. unsigned int crc_data_tx_en:1;
  85. unsigned int crc_gen_en:1;
  86. unsigned int crc_seed_dis:1;
  87. unsigned int reserved:21;
  88. unsigned int crc_tx_complete:1;
  89. };
  90. struct iop3xx_desc_dma {
  91. u32 next_desc;
  92. union {
  93. u32 pci_src_addr;
  94. u32 pci_dest_addr;
  95. u32 src_addr;
  96. };
  97. union {
  98. u32 upper_pci_src_addr;
  99. u32 upper_pci_dest_addr;
  100. };
  101. union {
  102. u32 local_pci_src_addr;
  103. u32 local_pci_dest_addr;
  104. u32 dest_addr;
  105. };
  106. u32 byte_count;
  107. union {
  108. u32 desc_ctrl;
  109. struct iop3xx_dma_desc_ctrl desc_ctrl_field;
  110. };
  111. u32 crc_addr;
  112. };
  113. struct iop3xx_desc_aau {
  114. u32 next_desc;
  115. u32 src[4];
  116. u32 dest_addr;
  117. u32 byte_count;
  118. union {
  119. u32 desc_ctrl;
  120. struct iop3xx_aau_desc_ctrl desc_ctrl_field;
  121. };
  122. union {
  123. u32 src_addr;
  124. u32 e_desc_ctrl;
  125. struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
  126. } src_edc[31];
  127. };
  128. struct iop3xx_aau_gfmr {
  129. unsigned int gfmr1:8;
  130. unsigned int gfmr2:8;
  131. unsigned int gfmr3:8;
  132. unsigned int gfmr4:8;
  133. };
  134. struct iop3xx_desc_pq_xor {
  135. u32 next_desc;
  136. u32 src[3];
  137. union {
  138. u32 data_mult1;
  139. struct iop3xx_aau_gfmr data_mult1_field;
  140. };
  141. u32 dest_addr;
  142. u32 byte_count;
  143. union {
  144. u32 desc_ctrl;
  145. struct iop3xx_aau_desc_ctrl desc_ctrl_field;
  146. };
  147. union {
  148. u32 src_addr;
  149. u32 e_desc_ctrl;
  150. struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
  151. u32 data_multiplier;
  152. struct iop3xx_aau_gfmr data_mult_field;
  153. u32 reserved;
  154. } src_edc_gfmr[19];
  155. };
  156. struct iop3xx_desc_dual_xor {
  157. u32 next_desc;
  158. u32 src0_addr;
  159. u32 src1_addr;
  160. u32 h_src_addr;
  161. u32 d_src_addr;
  162. u32 h_dest_addr;
  163. u32 byte_count;
  164. union {
  165. u32 desc_ctrl;
  166. struct iop3xx_aau_desc_ctrl desc_ctrl_field;
  167. };
  168. u32 d_dest_addr;
  169. };
  170. union iop3xx_desc {
  171. struct iop3xx_desc_aau *aau;
  172. struct iop3xx_desc_dma *dma;
  173. struct iop3xx_desc_pq_xor *pq_xor;
  174. struct iop3xx_desc_dual_xor *dual_xor;
  175. void *ptr;
  176. };
  177. /* No support for p+q operations */
  178. static inline int
  179. iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
  180. {
  181. BUG();
  182. return 0;
  183. }
  184. static inline void
  185. iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
  186. unsigned long flags)
  187. {
  188. BUG();
  189. }
  190. static inline void
  191. iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
  192. {
  193. BUG();
  194. }
  195. static inline void
  196. iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
  197. dma_addr_t addr, unsigned char coef)
  198. {
  199. BUG();
  200. }
  201. static inline int
  202. iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
  203. {
  204. BUG();
  205. return 0;
  206. }
  207. static inline void
  208. iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
  209. unsigned long flags)
  210. {
  211. BUG();
  212. }
  213. static inline void
  214. iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
  215. {
  216. BUG();
  217. }
  218. #define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
  219. static inline void
  220. iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
  221. dma_addr_t *src)
  222. {
  223. BUG();
  224. }
  225. static inline int iop_adma_get_max_xor(void)
  226. {
  227. return 32;
  228. }
  229. static inline int iop_adma_get_max_pq(void)
  230. {
  231. BUG();
  232. return 0;
  233. }
  234. static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
  235. {
  236. int id = chan->device->id;
  237. switch (id) {
  238. case DMA0_ID:
  239. case DMA1_ID:
  240. return __raw_readl(DMA_DAR(chan));
  241. case AAU_ID:
  242. return __raw_readl(AAU_ADAR(chan));
  243. default:
  244. BUG();
  245. }
  246. return 0;
  247. }
  248. static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
  249. u32 next_desc_addr)
  250. {
  251. int id = chan->device->id;
  252. switch (id) {
  253. case DMA0_ID:
  254. case DMA1_ID:
  255. __raw_writel(next_desc_addr, DMA_NDAR(chan));
  256. break;
  257. case AAU_ID:
  258. __raw_writel(next_desc_addr, AAU_ANDAR(chan));
  259. break;
  260. }
  261. }
  262. #define IOP_ADMA_STATUS_BUSY (1 << 10)
  263. #define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
  264. #define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
  265. #define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
  266. static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
  267. {
  268. u32 status = __raw_readl(DMA_CSR(chan));
  269. return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
  270. }
  271. static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
  272. int num_slots)
  273. {
  274. /* num_slots will only ever be 1, 2, 4, or 8 */
  275. return (desc->idx & (num_slots - 1)) ? 0 : 1;
  276. }
  277. /* to do: support large (i.e. > hw max) buffer sizes */
  278. static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
  279. {
  280. *slots_per_op = 1;
  281. return 1;
  282. }
  283. /* to do: support large (i.e. > hw max) buffer sizes */
  284. static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
  285. {
  286. *slots_per_op = 1;
  287. return 1;
  288. }
  289. static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
  290. int *slots_per_op)
  291. {
  292. static const char slot_count_table[] = {
  293. 1, 1, 1, 1, /* 01 - 04 */
  294. 2, 2, 2, 2, /* 05 - 08 */
  295. 4, 4, 4, 4, /* 09 - 12 */
  296. 4, 4, 4, 4, /* 13 - 16 */
  297. 8, 8, 8, 8, /* 17 - 20 */
  298. 8, 8, 8, 8, /* 21 - 24 */
  299. 8, 8, 8, 8, /* 25 - 28 */
  300. 8, 8, 8, 8, /* 29 - 32 */
  301. };
  302. *slots_per_op = slot_count_table[src_cnt - 1];
  303. return *slots_per_op;
  304. }
  305. static inline int
  306. iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
  307. {
  308. switch (chan->device->id) {
  309. case DMA0_ID:
  310. case DMA1_ID:
  311. return iop_chan_memcpy_slot_count(0, slots_per_op);
  312. case AAU_ID:
  313. return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
  314. default:
  315. BUG();
  316. }
  317. return 0;
  318. }
  319. static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
  320. int *slots_per_op)
  321. {
  322. int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
  323. if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
  324. return slot_cnt;
  325. len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
  326. while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
  327. len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
  328. slot_cnt += *slots_per_op;
  329. }
  330. slot_cnt += *slots_per_op;
  331. return slot_cnt;
  332. }
  333. /* zero sum on iop3xx is limited to 1k at a time so it requires multiple
  334. * descriptors
  335. */
  336. static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
  337. int *slots_per_op)
  338. {
  339. int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
  340. if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
  341. return slot_cnt;
  342. len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  343. while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
  344. len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  345. slot_cnt += *slots_per_op;
  346. }
  347. slot_cnt += *slots_per_op;
  348. return slot_cnt;
  349. }
  350. static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
  351. struct iop_adma_chan *chan)
  352. {
  353. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  354. switch (chan->device->id) {
  355. case DMA0_ID:
  356. case DMA1_ID:
  357. return hw_desc.dma->byte_count;
  358. case AAU_ID:
  359. return hw_desc.aau->byte_count;
  360. default:
  361. BUG();
  362. }
  363. return 0;
  364. }
  365. /* translate the src_idx to a descriptor word index */
  366. static inline int __desc_idx(int src_idx)
  367. {
  368. static const int desc_idx_table[] = { 0, 0, 0, 0,
  369. 0, 1, 2, 3,
  370. 5, 6, 7, 8,
  371. 9, 10, 11, 12,
  372. 14, 15, 16, 17,
  373. 18, 19, 20, 21,
  374. 23, 24, 25, 26,
  375. 27, 28, 29, 30,
  376. };
  377. return desc_idx_table[src_idx];
  378. }
  379. static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
  380. struct iop_adma_chan *chan,
  381. int src_idx)
  382. {
  383. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  384. switch (chan->device->id) {
  385. case DMA0_ID:
  386. case DMA1_ID:
  387. return hw_desc.dma->src_addr;
  388. case AAU_ID:
  389. break;
  390. default:
  391. BUG();
  392. }
  393. if (src_idx < 4)
  394. return hw_desc.aau->src[src_idx];
  395. else
  396. return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
  397. }
  398. static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
  399. int src_idx, dma_addr_t addr)
  400. {
  401. if (src_idx < 4)
  402. hw_desc->src[src_idx] = addr;
  403. else
  404. hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
  405. }
  406. static inline void
  407. iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
  408. {
  409. struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
  410. union {
  411. u32 value;
  412. struct iop3xx_dma_desc_ctrl field;
  413. } u_desc_ctrl;
  414. u_desc_ctrl.value = 0;
  415. u_desc_ctrl.field.mem_to_mem_en = 1;
  416. u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
  417. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  418. hw_desc->desc_ctrl = u_desc_ctrl.value;
  419. hw_desc->upper_pci_src_addr = 0;
  420. hw_desc->crc_addr = 0;
  421. }
  422. static inline void
  423. iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
  424. {
  425. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  426. union {
  427. u32 value;
  428. struct iop3xx_aau_desc_ctrl field;
  429. } u_desc_ctrl;
  430. u_desc_ctrl.value = 0;
  431. u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
  432. u_desc_ctrl.field.dest_write_en = 1;
  433. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  434. hw_desc->desc_ctrl = u_desc_ctrl.value;
  435. }
  436. static inline u32
  437. iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
  438. unsigned long flags)
  439. {
  440. int i, shift;
  441. u32 edcr;
  442. union {
  443. u32 value;
  444. struct iop3xx_aau_desc_ctrl field;
  445. } u_desc_ctrl;
  446. u_desc_ctrl.value = 0;
  447. switch (src_cnt) {
  448. case 25 ... 32:
  449. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  450. edcr = 0;
  451. shift = 1;
  452. for (i = 24; i < src_cnt; i++) {
  453. edcr |= (1 << shift);
  454. shift += 3;
  455. }
  456. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
  457. src_cnt = 24;
  458. /* fall through */
  459. case 17 ... 24:
  460. if (!u_desc_ctrl.field.blk_ctrl) {
  461. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
  462. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  463. }
  464. edcr = 0;
  465. shift = 1;
  466. for (i = 16; i < src_cnt; i++) {
  467. edcr |= (1 << shift);
  468. shift += 3;
  469. }
  470. hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
  471. src_cnt = 16;
  472. /* fall through */
  473. case 9 ... 16:
  474. if (!u_desc_ctrl.field.blk_ctrl)
  475. u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
  476. edcr = 0;
  477. shift = 1;
  478. for (i = 8; i < src_cnt; i++) {
  479. edcr |= (1 << shift);
  480. shift += 3;
  481. }
  482. hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
  483. src_cnt = 8;
  484. /* fall through */
  485. case 2 ... 8:
  486. shift = 1;
  487. for (i = 0; i < src_cnt; i++) {
  488. u_desc_ctrl.value |= (1 << shift);
  489. shift += 3;
  490. }
  491. if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
  492. u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
  493. }
  494. u_desc_ctrl.field.dest_write_en = 1;
  495. u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
  496. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  497. hw_desc->desc_ctrl = u_desc_ctrl.value;
  498. return u_desc_ctrl.value;
  499. }
  500. static inline void
  501. iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
  502. unsigned long flags)
  503. {
  504. iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
  505. }
  506. /* return the number of operations */
  507. static inline int
  508. iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
  509. unsigned long flags)
  510. {
  511. int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
  512. struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
  513. union {
  514. u32 value;
  515. struct iop3xx_aau_desc_ctrl field;
  516. } u_desc_ctrl;
  517. int i, j;
  518. hw_desc = desc->hw_desc;
  519. for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
  520. i += slots_per_op, j++) {
  521. iter = iop_hw_desc_slot_idx(hw_desc, i);
  522. u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
  523. u_desc_ctrl.field.dest_write_en = 0;
  524. u_desc_ctrl.field.zero_result_en = 1;
  525. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  526. iter->desc_ctrl = u_desc_ctrl.value;
  527. /* for the subsequent descriptors preserve the store queue
  528. * and chain them together
  529. */
  530. if (i) {
  531. prev_hw_desc =
  532. iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
  533. prev_hw_desc->next_desc =
  534. (u32) (desc->async_tx.phys + (i << 5));
  535. }
  536. }
  537. return j;
  538. }
  539. static inline void
  540. iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
  541. unsigned long flags)
  542. {
  543. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  544. union {
  545. u32 value;
  546. struct iop3xx_aau_desc_ctrl field;
  547. } u_desc_ctrl;
  548. u_desc_ctrl.value = 0;
  549. switch (src_cnt) {
  550. case 25 ... 32:
  551. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  552. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
  553. /* fall through */
  554. case 17 ... 24:
  555. if (!u_desc_ctrl.field.blk_ctrl) {
  556. hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
  557. u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
  558. }
  559. hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
  560. /* fall through */
  561. case 9 ... 16:
  562. if (!u_desc_ctrl.field.blk_ctrl)
  563. u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
  564. hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
  565. /* fall through */
  566. case 1 ... 8:
  567. if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
  568. u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
  569. }
  570. u_desc_ctrl.field.dest_write_en = 0;
  571. u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
  572. hw_desc->desc_ctrl = u_desc_ctrl.value;
  573. }
  574. static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
  575. struct iop_adma_chan *chan,
  576. u32 byte_count)
  577. {
  578. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  579. switch (chan->device->id) {
  580. case DMA0_ID:
  581. case DMA1_ID:
  582. hw_desc.dma->byte_count = byte_count;
  583. break;
  584. case AAU_ID:
  585. hw_desc.aau->byte_count = byte_count;
  586. break;
  587. default:
  588. BUG();
  589. }
  590. }
  591. static inline void
  592. iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
  593. struct iop_adma_chan *chan)
  594. {
  595. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  596. switch (chan->device->id) {
  597. case DMA0_ID:
  598. case DMA1_ID:
  599. iop_desc_init_memcpy(desc, 1);
  600. hw_desc.dma->byte_count = 0;
  601. hw_desc.dma->dest_addr = 0;
  602. hw_desc.dma->src_addr = 0;
  603. break;
  604. case AAU_ID:
  605. iop_desc_init_null_xor(desc, 2, 1);
  606. hw_desc.aau->byte_count = 0;
  607. hw_desc.aau->dest_addr = 0;
  608. hw_desc.aau->src[0] = 0;
  609. hw_desc.aau->src[1] = 0;
  610. break;
  611. default:
  612. BUG();
  613. }
  614. }
  615. static inline void
  616. iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
  617. {
  618. int slots_per_op = desc->slots_per_op;
  619. struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
  620. int i = 0;
  621. if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
  622. hw_desc->byte_count = len;
  623. } else {
  624. do {
  625. iter = iop_hw_desc_slot_idx(hw_desc, i);
  626. iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  627. len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
  628. i += slots_per_op;
  629. } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
  630. iter = iop_hw_desc_slot_idx(hw_desc, i);
  631. iter->byte_count = len;
  632. }
  633. }
  634. static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
  635. struct iop_adma_chan *chan,
  636. dma_addr_t addr)
  637. {
  638. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  639. switch (chan->device->id) {
  640. case DMA0_ID:
  641. case DMA1_ID:
  642. hw_desc.dma->dest_addr = addr;
  643. break;
  644. case AAU_ID:
  645. hw_desc.aau->dest_addr = addr;
  646. break;
  647. default:
  648. BUG();
  649. }
  650. }
  651. static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
  652. dma_addr_t addr)
  653. {
  654. struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
  655. hw_desc->src_addr = addr;
  656. }
  657. static inline void
  658. iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
  659. dma_addr_t addr)
  660. {
  661. struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
  662. int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
  663. int i;
  664. for (i = 0; (slot_cnt -= slots_per_op) >= 0;
  665. i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
  666. iter = iop_hw_desc_slot_idx(hw_desc, i);
  667. iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
  668. }
  669. }
  670. static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
  671. int src_idx, dma_addr_t addr)
  672. {
  673. struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
  674. int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
  675. int i;
  676. for (i = 0; (slot_cnt -= slots_per_op) >= 0;
  677. i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
  678. iter = iop_hw_desc_slot_idx(hw_desc, i);
  679. iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
  680. }
  681. }
  682. static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
  683. u32 next_desc_addr)
  684. {
  685. /* hw_desc->next_desc is the same location for all channels */
  686. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  687. iop_paranoia(hw_desc.dma->next_desc);
  688. hw_desc.dma->next_desc = next_desc_addr;
  689. }
  690. static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
  691. {
  692. /* hw_desc->next_desc is the same location for all channels */
  693. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  694. return hw_desc.dma->next_desc;
  695. }
  696. static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
  697. {
  698. /* hw_desc->next_desc is the same location for all channels */
  699. union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
  700. hw_desc.dma->next_desc = 0;
  701. }
  702. static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
  703. u32 val)
  704. {
  705. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  706. hw_desc->src[0] = val;
  707. }
  708. static inline enum sum_check_flags
  709. iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
  710. {
  711. struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
  712. struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
  713. iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
  714. return desc_ctrl.zero_result_err << SUM_CHECK_P;
  715. }
  716. static inline void iop_chan_append(struct iop_adma_chan *chan)
  717. {
  718. u32 dma_chan_ctrl;
  719. dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
  720. dma_chan_ctrl |= 0x2;
  721. __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
  722. }
  723. static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
  724. {
  725. return __raw_readl(DMA_CSR(chan));
  726. }
  727. static inline void iop_chan_disable(struct iop_adma_chan *chan)
  728. {
  729. u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
  730. dma_chan_ctrl &= ~1;
  731. __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
  732. }
  733. static inline void iop_chan_enable(struct iop_adma_chan *chan)
  734. {
  735. u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
  736. dma_chan_ctrl |= 1;
  737. __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
  738. }
  739. static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
  740. {
  741. u32 status = __raw_readl(DMA_CSR(chan));
  742. status &= (1 << 9);
  743. __raw_writel(status, DMA_CSR(chan));
  744. }
  745. static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
  746. {
  747. u32 status = __raw_readl(DMA_CSR(chan));
  748. status &= (1 << 8);
  749. __raw_writel(status, DMA_CSR(chan));
  750. }
  751. static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
  752. {
  753. u32 status = __raw_readl(DMA_CSR(chan));
  754. switch (chan->device->id) {
  755. case DMA0_ID:
  756. case DMA1_ID:
  757. status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
  758. break;
  759. case AAU_ID:
  760. status &= (1 << 5);
  761. break;
  762. default:
  763. BUG();
  764. }
  765. __raw_writel(status, DMA_CSR(chan));
  766. }
  767. static inline int
  768. iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
  769. {
  770. return 0;
  771. }
  772. static inline int
  773. iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
  774. {
  775. return 0;
  776. }
  777. static inline int
  778. iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
  779. {
  780. return 0;
  781. }
  782. static inline int
  783. iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
  784. {
  785. return test_bit(5, &status);
  786. }
  787. static inline int
  788. iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
  789. {
  790. switch (chan->device->id) {
  791. case DMA0_ID:
  792. case DMA1_ID:
  793. return test_bit(2, &status);
  794. default:
  795. return 0;
  796. }
  797. }
  798. static inline int
  799. iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
  800. {
  801. switch (chan->device->id) {
  802. case DMA0_ID:
  803. case DMA1_ID:
  804. return test_bit(3, &status);
  805. default:
  806. return 0;
  807. }
  808. }
  809. static inline int
  810. iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
  811. {
  812. switch (chan->device->id) {
  813. case DMA0_ID:
  814. case DMA1_ID:
  815. return test_bit(1, &status);
  816. default:
  817. return 0;
  818. }
  819. }
  820. #endif /* _ADMA_H */