dma-mapping.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. /*
  2. * OpenRISC Linux
  3. *
  4. * Linux architectural port borrowing liberally from similar works of
  5. * others. All original copyrights apply as per the original source
  6. * declaration.
  7. *
  8. * OpenRISC implementation:
  9. * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. */
  16. #ifndef __ASM_OPENRISC_DMA_MAPPING_H
  17. #define __ASM_OPENRISC_DMA_MAPPING_H
  18. /*
  19. * See Documentation/DMA-API-HOWTO.txt and
  20. * Documentation/DMA-API.txt for documentation.
  21. *
  22. * This file is written with the intention of eventually moving over
  23. * to largely using asm-generic/dma-mapping-common.h in its place.
  24. */
  25. #include <linux/dma-debug.h>
  26. #include <asm-generic/dma-coherent.h>
  27. #include <linux/kmemcheck.h>
  28. #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
  29. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  30. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  31. void *or1k_dma_alloc_coherent(struct device *dev, size_t size,
  32. dma_addr_t *dma_handle, gfp_t flag);
  33. void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  34. dma_addr_t dma_handle);
  35. dma_addr_t or1k_map_page(struct device *dev, struct page *page,
  36. unsigned long offset, size_t size,
  37. enum dma_data_direction dir,
  38. struct dma_attrs *attrs);
  39. void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
  40. size_t size, enum dma_data_direction dir,
  41. struct dma_attrs *attrs);
  42. int or1k_map_sg(struct device *dev, struct scatterlist *sg,
  43. int nents, enum dma_data_direction dir,
  44. struct dma_attrs *attrs);
  45. void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
  46. int nents, enum dma_data_direction dir,
  47. struct dma_attrs *attrs);
  48. void or1k_sync_single_for_cpu(struct device *dev,
  49. dma_addr_t dma_handle, size_t size,
  50. enum dma_data_direction dir);
  51. void or1k_sync_single_for_device(struct device *dev,
  52. dma_addr_t dma_handle, size_t size,
  53. enum dma_data_direction dir);
  54. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  55. dma_addr_t *dma_handle, gfp_t flag)
  56. {
  57. void *memory;
  58. memory = or1k_dma_alloc_coherent(dev, size, dma_handle, flag);
  59. debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
  60. return memory;
  61. }
  62. static inline void dma_free_coherent(struct device *dev, size_t size,
  63. void *cpu_addr, dma_addr_t dma_handle)
  64. {
  65. debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
  66. or1k_dma_free_coherent(dev, size, cpu_addr, dma_handle);
  67. }
  68. static inline dma_addr_t dma_map_single(struct device *dev, void *ptr,
  69. size_t size,
  70. enum dma_data_direction dir)
  71. {
  72. dma_addr_t addr;
  73. kmemcheck_mark_initialized(ptr, size);
  74. BUG_ON(!valid_dma_direction(dir));
  75. addr = or1k_map_page(dev, virt_to_page(ptr),
  76. (unsigned long)ptr & ~PAGE_MASK, size,
  77. dir, NULL);
  78. debug_dma_map_page(dev, virt_to_page(ptr),
  79. (unsigned long)ptr & ~PAGE_MASK, size,
  80. dir, addr, true);
  81. return addr;
  82. }
  83. static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
  84. size_t size,
  85. enum dma_data_direction dir)
  86. {
  87. BUG_ON(!valid_dma_direction(dir));
  88. or1k_unmap_page(dev, addr, size, dir, NULL);
  89. debug_dma_unmap_page(dev, addr, size, dir, true);
  90. }
  91. static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
  92. int nents, enum dma_data_direction dir)
  93. {
  94. int i, ents;
  95. struct scatterlist *s;
  96. for_each_sg(sg, s, nents, i)
  97. kmemcheck_mark_initialized(sg_virt(s), s->length);
  98. BUG_ON(!valid_dma_direction(dir));
  99. ents = or1k_map_sg(dev, sg, nents, dir, NULL);
  100. debug_dma_map_sg(dev, sg, nents, ents, dir);
  101. return ents;
  102. }
  103. static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  104. int nents, enum dma_data_direction dir)
  105. {
  106. BUG_ON(!valid_dma_direction(dir));
  107. debug_dma_unmap_sg(dev, sg, nents, dir);
  108. or1k_unmap_sg(dev, sg, nents, dir, NULL);
  109. }
  110. static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  111. size_t offset, size_t size,
  112. enum dma_data_direction dir)
  113. {
  114. dma_addr_t addr;
  115. kmemcheck_mark_initialized(page_address(page) + offset, size);
  116. BUG_ON(!valid_dma_direction(dir));
  117. addr = or1k_map_page(dev, page, offset, size, dir, NULL);
  118. debug_dma_map_page(dev, page, offset, size, dir, addr, false);
  119. return addr;
  120. }
  121. static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
  122. size_t size, enum dma_data_direction dir)
  123. {
  124. BUG_ON(!valid_dma_direction(dir));
  125. or1k_unmap_page(dev, addr, size, dir, NULL);
  126. debug_dma_unmap_page(dev, addr, size, dir, true);
  127. }
  128. static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
  129. size_t size,
  130. enum dma_data_direction dir)
  131. {
  132. BUG_ON(!valid_dma_direction(dir));
  133. or1k_sync_single_for_cpu(dev, addr, size, dir);
  134. debug_dma_sync_single_for_cpu(dev, addr, size, dir);
  135. }
  136. static inline void dma_sync_single_for_device(struct device *dev,
  137. dma_addr_t addr, size_t size,
  138. enum dma_data_direction dir)
  139. {
  140. BUG_ON(!valid_dma_direction(dir));
  141. or1k_sync_single_for_device(dev, addr, size, dir);
  142. debug_dma_sync_single_for_device(dev, addr, size, dir);
  143. }
  144. static inline int dma_supported(struct device *dev, u64 dma_mask)
  145. {
  146. /* Support 32 bit DMA mask exclusively */
  147. return dma_mask == DMA_BIT_MASK(32);
  148. }
  149. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  150. {
  151. return 0;
  152. }
  153. static inline int dma_set_mask(struct device *dev, u64 dma_mask)
  154. {
  155. if (!dev->dma_mask || !dma_supported(dev, dma_mask))
  156. return -EIO;
  157. *dev->dma_mask = dma_mask;
  158. return 0;
  159. }
  160. #endif /* __ASM_OPENRISC_DMA_MAPPING_H */