dma.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _M68K_DMA_H
  3. #define _M68K_DMA_H 1
  4. #ifdef CONFIG_COLDFIRE
  5. /*
  6. * ColdFire DMA Model:
  7. * ColdFire DMA supports two forms of DMA: Single and Dual address. Single
  8. * address mode emits a source address, and expects that the device will either
  9. * pick up the data (DMA READ) or source data (DMA WRITE). This implies that
  10. * the device will place data on the correct byte(s) of the data bus, as the
  11. * memory transactions are always 32 bits. This implies that only 32 bit
  12. * devices will find single mode transfers useful. Dual address DMA mode
  13. * performs two cycles: source read and destination write. ColdFire will
  14. * align the data so that the device will always get the correct bytes, thus
  15. * is useful for 8 and 16 bit devices. This is the mode that is supported
  16. * below.
  17. *
  18. * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000
  19. * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
  20. *
  21. * AUG/25/2000 : added support for 8, 16 and 32-bit Single-Address-Mode (K)2000
  22. * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
  23. *
  24. * APR/18/2002 : added proper support for MCF5272 DMA controller.
  25. * Arthur Shipkowski (art@videon-central.com)
  26. */
  27. #include <asm/coldfire.h>
  28. #include <asm/mcfsim.h>
  29. #include <asm/mcfdma.h>
  30. /*
  31. * Set number of channels of DMA on ColdFire for different implementations.
  32. */
  33. #if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) || \
  34. defined(CONFIG_M523x) || defined(CONFIG_M527x) || \
  35. defined(CONFIG_M528x) || defined(CONFIG_M525x)
  36. #define MAX_M68K_DMA_CHANNELS 4
  37. #elif defined(CONFIG_M5272)
  38. #define MAX_M68K_DMA_CHANNELS 1
  39. #elif defined(CONFIG_M53xx)
  40. #define MAX_M68K_DMA_CHANNELS 0
  41. #else
  42. #define MAX_M68K_DMA_CHANNELS 2
  43. #endif
  44. extern unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS];
  45. extern unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
  46. #if !defined(CONFIG_M5272)
  47. #define DMA_MODE_WRITE_BIT 0x01 /* Memory/IO to IO/Memory select */
  48. #define DMA_MODE_WORD_BIT 0x02 /* 8 or 16 bit transfers */
  49. #define DMA_MODE_LONG_BIT 0x04 /* or 32 bit transfers */
  50. #define DMA_MODE_SINGLE_BIT 0x08 /* single-address-mode */
  51. /* I/O to memory, 8 bits, mode */
  52. #define DMA_MODE_READ 0
  53. /* memory to I/O, 8 bits, mode */
  54. #define DMA_MODE_WRITE 1
  55. /* I/O to memory, 16 bits, mode */
  56. #define DMA_MODE_READ_WORD 2
  57. /* memory to I/O, 16 bits, mode */
  58. #define DMA_MODE_WRITE_WORD 3
  59. /* I/O to memory, 32 bits, mode */
  60. #define DMA_MODE_READ_LONG 4
  61. /* memory to I/O, 32 bits, mode */
  62. #define DMA_MODE_WRITE_LONG 5
  63. /* I/O to memory, 8 bits, single-address-mode */
  64. #define DMA_MODE_READ_SINGLE 8
  65. /* memory to I/O, 8 bits, single-address-mode */
  66. #define DMA_MODE_WRITE_SINGLE 9
  67. /* I/O to memory, 16 bits, single-address-mode */
  68. #define DMA_MODE_READ_WORD_SINGLE 10
  69. /* memory to I/O, 16 bits, single-address-mode */
  70. #define DMA_MODE_WRITE_WORD_SINGLE 11
  71. /* I/O to memory, 32 bits, single-address-mode */
  72. #define DMA_MODE_READ_LONG_SINGLE 12
  73. /* memory to I/O, 32 bits, single-address-mode */
  74. #define DMA_MODE_WRITE_LONG_SINGLE 13
  75. #else /* CONFIG_M5272 is defined */
  76. /* Source static-address mode */
  77. #define DMA_MODE_SRC_SA_BIT 0x01
  78. /* Two bits to select between all four modes */
  79. #define DMA_MODE_SSIZE_MASK 0x06
  80. /* Offset to shift bits in */
  81. #define DMA_MODE_SSIZE_OFF 0x01
  82. /* Destination static-address mode */
  83. #define DMA_MODE_DES_SA_BIT 0x10
  84. /* Two bits to select between all four modes */
  85. #define DMA_MODE_DSIZE_MASK 0x60
  86. /* Offset to shift bits in */
  87. #define DMA_MODE_DSIZE_OFF 0x05
  88. /* Size modifiers */
  89. #define DMA_MODE_SIZE_LONG 0x00
  90. #define DMA_MODE_SIZE_BYTE 0x01
  91. #define DMA_MODE_SIZE_WORD 0x02
  92. #define DMA_MODE_SIZE_LINE 0x03
  93. /*
  94. * Aliases to help speed quick ports; these may be suboptimal, however. They
  95. * do not include the SINGLE mode modifiers since the MCF5272 does not have a
  96. * mode where the device is in control of its addressing.
  97. */
  98. /* I/O to memory, 8 bits, mode */
  99. #define DMA_MODE_READ ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
  100. /* memory to I/O, 8 bits, mode */
  101. #define DMA_MODE_WRITE ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
  102. /* I/O to memory, 16 bits, mode */
  103. #define DMA_MODE_READ_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
  104. /* memory to I/O, 16 bits, mode */
  105. #define DMA_MODE_WRITE_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
  106. /* I/O to memory, 32 bits, mode */
  107. #define DMA_MODE_READ_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
  108. /* memory to I/O, 32 bits, mode */
  109. #define DMA_MODE_WRITE_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
  110. #endif /* !defined(CONFIG_M5272) */
  111. #if !defined(CONFIG_M5272)
  112. /* enable/disable a specific DMA channel */
  113. static __inline__ void enable_dma(unsigned int dmanr)
  114. {
  115. volatile unsigned short *dmawp;
  116. #ifdef DMA_DEBUG
  117. printk("enable_dma(dmanr=%d)\n", dmanr);
  118. #endif
  119. dmawp = (unsigned short *) dma_base_addr[dmanr];
  120. dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT;
  121. }
  122. static __inline__ void disable_dma(unsigned int dmanr)
  123. {
  124. volatile unsigned short *dmawp;
  125. volatile unsigned char *dmapb;
  126. #ifdef DMA_DEBUG
  127. printk("disable_dma(dmanr=%d)\n", dmanr);
  128. #endif
  129. dmawp = (unsigned short *) dma_base_addr[dmanr];
  130. dmapb = (unsigned char *) dma_base_addr[dmanr];
  131. /* Turn off external requests, and stop any DMA in progress */
  132. dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT;
  133. dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE;
  134. }
  135. /*
  136. * Clear the 'DMA Pointer Flip Flop'.
  137. * Write 0 for LSB/MSB, 1 for MSB/LSB access.
  138. * Use this once to initialize the FF to a known state.
  139. * After that, keep track of it. :-)
  140. * --- In order to do that, the DMA routines below should ---
  141. * --- only be used while interrupts are disabled! ---
  142. *
  143. * This is a NOP for ColdFire. Provide a stub for compatibility.
  144. */
  145. static __inline__ void clear_dma_ff(unsigned int dmanr)
  146. {
  147. }
  148. /* set mode (above) for a specific DMA channel */
  149. static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
  150. {
  151. volatile unsigned char *dmabp;
  152. volatile unsigned short *dmawp;
  153. #ifdef DMA_DEBUG
  154. printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
  155. #endif
  156. dmabp = (unsigned char *) dma_base_addr[dmanr];
  157. dmawp = (unsigned short *) dma_base_addr[dmanr];
  158. /* Clear config errors */
  159. dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE;
  160. /* Set command register */
  161. dmawp[MCFDMA_DCR] =
  162. MCFDMA_DCR_INT | /* Enable completion irq */
  163. MCFDMA_DCR_CS | /* Force one xfer per request */
  164. MCFDMA_DCR_AA | /* Enable auto alignment */
  165. /* single-address-mode */
  166. ((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) |
  167. /* sets s_rw (-> r/w) high if Memory to I/0 */
  168. ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) |
  169. /* Memory to I/O or I/O to Memory */
  170. ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) |
  171. /* 32 bit, 16 bit or 8 bit transfers */
  172. ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_SSIZE_WORD :
  173. ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG :
  174. MCFDMA_DCR_SSIZE_BYTE)) |
  175. ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_DSIZE_WORD :
  176. ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG :
  177. MCFDMA_DCR_DSIZE_BYTE));
  178. #ifdef DEBUG_DMA
  179. printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__,
  180. dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR],
  181. (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]);
  182. #endif
  183. }
  184. /* Set transfer address for specific DMA channel */
  185. static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
  186. {
  187. volatile unsigned short *dmawp;
  188. volatile unsigned int *dmalp;
  189. #ifdef DMA_DEBUG
  190. printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
  191. #endif
  192. dmawp = (unsigned short *) dma_base_addr[dmanr];
  193. dmalp = (unsigned int *) dma_base_addr[dmanr];
  194. /* Determine which address registers are used for memory/device accesses */
  195. if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) {
  196. /* Source incrementing, must be memory */
  197. dmalp[MCFDMA_SAR] = a;
  198. /* Set dest address, must be device */
  199. dmalp[MCFDMA_DAR] = dma_device_address[dmanr];
  200. } else {
  201. /* Destination incrementing, must be memory */
  202. dmalp[MCFDMA_DAR] = a;
  203. /* Set source address, must be device */
  204. dmalp[MCFDMA_SAR] = dma_device_address[dmanr];
  205. }
  206. #ifdef DEBUG_DMA
  207. printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
  208. __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR],
  209. (int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR],
  210. (int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]);
  211. #endif
  212. }
  213. /*
  214. * Specific for Coldfire - sets device address.
  215. * Should be called after the mode set call, and before set DMA address.
  216. */
  217. static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
  218. {
  219. #ifdef DMA_DEBUG
  220. printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
  221. #endif
  222. dma_device_address[dmanr] = a;
  223. }
  224. /*
  225. * NOTE 2: "count" represents _bytes_.
  226. */
  227. static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
  228. {
  229. volatile unsigned short *dmawp;
  230. #ifdef DMA_DEBUG
  231. printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
  232. #endif
  233. dmawp = (unsigned short *) dma_base_addr[dmanr];
  234. dmawp[MCFDMA_BCR] = (unsigned short)count;
  235. }
  236. /*
  237. * Get DMA residue count. After a DMA transfer, this
  238. * should return zero. Reading this while a DMA transfer is
  239. * still in progress will return unpredictable results.
  240. * Otherwise, it returns the number of _bytes_ left to transfer.
  241. */
  242. static __inline__ int get_dma_residue(unsigned int dmanr)
  243. {
  244. volatile unsigned short *dmawp;
  245. unsigned short count;
  246. #ifdef DMA_DEBUG
  247. printk("get_dma_residue(dmanr=%d)\n", dmanr);
  248. #endif
  249. dmawp = (unsigned short *) dma_base_addr[dmanr];
  250. count = dmawp[MCFDMA_BCR];
  251. return((int) count);
  252. }
  253. #else /* CONFIG_M5272 is defined */
  254. /*
  255. * The MCF5272 DMA controller is very different than the controller defined above
  256. * in terms of register mapping. For instance, with the exception of the 16-bit
  257. * interrupt register (IRQ#85, for reference), all of the registers are 32-bit.
  258. *
  259. * The big difference, however, is the lack of device-requested DMA. All modes
  260. * are dual address transfer, and there is no 'device' setup or direction bit.
  261. * You can DMA between a device and memory, between memory and memory, or even between
  262. * two devices directly, with any combination of incrementing and non-incrementing
  263. * addresses you choose. This puts a crimp in distinguishing between the 'device
  264. * address' set up by set_dma_device_addr.
  265. *
  266. * Therefore, there are two options. One is to use set_dma_addr and set_dma_device_addr,
  267. * which will act exactly as above in -- it will look to see if the source is set to
  268. * autoincrement, and if so it will make the source use the set_dma_addr value and the
  269. * destination the set_dma_device_addr value. Otherwise the source will be set to the
  270. * set_dma_device_addr value and the destination will get the set_dma_addr value.
  271. *
  272. * The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions
  273. * and make it explicit. Depending on what you're doing, one of these two should work
  274. * for you, but don't mix them in the same transfer setup.
  275. */
  276. /* enable/disable a specific DMA channel */
  277. static __inline__ void enable_dma(unsigned int dmanr)
  278. {
  279. volatile unsigned int *dmalp;
  280. #ifdef DMA_DEBUG
  281. printk("enable_dma(dmanr=%d)\n", dmanr);
  282. #endif
  283. dmalp = (unsigned int *) dma_base_addr[dmanr];
  284. dmalp[MCFDMA_DMR] |= MCFDMA_DMR_EN;
  285. }
  286. static __inline__ void disable_dma(unsigned int dmanr)
  287. {
  288. volatile unsigned int *dmalp;
  289. #ifdef DMA_DEBUG
  290. printk("disable_dma(dmanr=%d)\n", dmanr);
  291. #endif
  292. dmalp = (unsigned int *) dma_base_addr[dmanr];
  293. /* Turn off external requests, and stop any DMA in progress */
  294. dmalp[MCFDMA_DMR] &= ~MCFDMA_DMR_EN;
  295. dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
  296. }
  297. /*
  298. * Clear the 'DMA Pointer Flip Flop'.
  299. * Write 0 for LSB/MSB, 1 for MSB/LSB access.
  300. * Use this once to initialize the FF to a known state.
  301. * After that, keep track of it. :-)
  302. * --- In order to do that, the DMA routines below should ---
  303. * --- only be used while interrupts are disabled! ---
  304. *
  305. * This is a NOP for ColdFire. Provide a stub for compatibility.
  306. */
  307. static __inline__ void clear_dma_ff(unsigned int dmanr)
  308. {
  309. }
  310. /* set mode (above) for a specific DMA channel */
  311. static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
  312. {
  313. volatile unsigned int *dmalp;
  314. volatile unsigned short *dmawp;
  315. #ifdef DMA_DEBUG
  316. printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
  317. #endif
  318. dmalp = (unsigned int *) dma_base_addr[dmanr];
  319. dmawp = (unsigned short *) dma_base_addr[dmanr];
  320. /* Clear config errors */
  321. dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
  322. /* Set command register */
  323. dmalp[MCFDMA_DMR] =
  324. MCFDMA_DMR_RQM_DUAL | /* Mandatory Request Mode setting */
  325. MCFDMA_DMR_DSTT_SD | /* Set up addressing types; set to supervisor-data. */
  326. MCFDMA_DMR_SRCT_SD | /* Set up addressing types; set to supervisor-data. */
  327. /* source static-address-mode */
  328. ((mode & DMA_MODE_SRC_SA_BIT) ? MCFDMA_DMR_SRCM_SA : MCFDMA_DMR_SRCM_IA) |
  329. /* dest static-address-mode */
  330. ((mode & DMA_MODE_DES_SA_BIT) ? MCFDMA_DMR_DSTM_SA : MCFDMA_DMR_DSTM_IA) |
  331. /* burst, 32 bit, 16 bit or 8 bit transfers are separately configurable on the MCF5272 */
  332. (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_DSTS_OFF) |
  333. (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_SRCS_OFF);
  334. dmawp[MCFDMA_DIR] |= MCFDMA_DIR_ASCEN; /* Enable completion interrupts */
  335. #ifdef DEBUG_DMA
  336. printk("%s(%d): dmanr=%d DMR[%x]=%x DIR[%x]=%x\n", __FILE__, __LINE__,
  337. dmanr, (int) &dmalp[MCFDMA_DMR], dmalp[MCFDMA_DMR],
  338. (int) &dmawp[MCFDMA_DIR], dmawp[MCFDMA_DIR]);
  339. #endif
  340. }
  341. /* Set transfer address for specific DMA channel */
  342. static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
  343. {
  344. volatile unsigned int *dmalp;
  345. #ifdef DMA_DEBUG
  346. printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
  347. #endif
  348. dmalp = (unsigned int *) dma_base_addr[dmanr];
  349. /* Determine which address registers are used for memory/device accesses */
  350. if (dmalp[MCFDMA_DMR] & MCFDMA_DMR_SRCM) {
  351. /* Source incrementing, must be memory */
  352. dmalp[MCFDMA_DSAR] = a;
  353. /* Set dest address, must be device */
  354. dmalp[MCFDMA_DDAR] = dma_device_address[dmanr];
  355. } else {
  356. /* Destination incrementing, must be memory */
  357. dmalp[MCFDMA_DDAR] = a;
  358. /* Set source address, must be device */
  359. dmalp[MCFDMA_DSAR] = dma_device_address[dmanr];
  360. }
  361. #ifdef DEBUG_DMA
  362. printk("%s(%d): dmanr=%d DMR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
  363. __FILE__, __LINE__, dmanr, (int) &dmalp[MCFDMA_DMR], dmalp[MCFDMA_DMR],
  364. (int) &dmalp[MCFDMA_DSAR], dmalp[MCFDMA_DSAR],
  365. (int) &dmalp[MCFDMA_DDAR], dmalp[MCFDMA_DDAR]);
  366. #endif
  367. }
  368. /*
  369. * Specific for Coldfire - sets device address.
  370. * Should be called after the mode set call, and before set DMA address.
  371. */
  372. static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
  373. {
  374. #ifdef DMA_DEBUG
  375. printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
  376. #endif
  377. dma_device_address[dmanr] = a;
  378. }
  379. /*
  380. * NOTE 2: "count" represents _bytes_.
  381. *
  382. * NOTE 3: While a 32-bit register, "count" is only a maximum 24-bit value.
  383. */
  384. static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
  385. {
  386. volatile unsigned int *dmalp;
  387. #ifdef DMA_DEBUG
  388. printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
  389. #endif
  390. dmalp = (unsigned int *) dma_base_addr[dmanr];
  391. dmalp[MCFDMA_DBCR] = count;
  392. }
  393. /*
  394. * Get DMA residue count. After a DMA transfer, this
  395. * should return zero. Reading this while a DMA transfer is
  396. * still in progress will return unpredictable results.
  397. * Otherwise, it returns the number of _bytes_ left to transfer.
  398. */
  399. static __inline__ int get_dma_residue(unsigned int dmanr)
  400. {
  401. volatile unsigned int *dmalp;
  402. unsigned int count;
  403. #ifdef DMA_DEBUG
  404. printk("get_dma_residue(dmanr=%d)\n", dmanr);
  405. #endif
  406. dmalp = (unsigned int *) dma_base_addr[dmanr];
  407. count = dmalp[MCFDMA_DBCR];
  408. return(count);
  409. }
  410. #endif /* !defined(CONFIG_M5272) */
  411. #endif /* CONFIG_COLDFIRE */
  412. /* it's useless on the m68k, but unfortunately needed by the new
  413. bootmem allocator (but this should do it for this) */
  414. #define MAX_DMA_ADDRESS PAGE_OFFSET
  415. #define MAX_DMA_CHANNELS 8
  416. extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
  417. extern void free_dma(unsigned int dmanr); /* release it again */
  418. #ifdef CONFIG_PCI
  419. extern int isa_dma_bridge_buggy;
  420. #else
  421. #define isa_dma_bridge_buggy (0)
  422. #endif
  423. #endif /* _M68K_DMA_H */