dma-mapping.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. #ifndef _PARISC_DMA_MAPPING_H
  2. #define _PARISC_DMA_MAPPING_H
  3. #include <asm/cacheflush.h>
  4. /*
  5. ** We need to support 4 different coherent dma models with one binary:
  6. **
  7. ** I/O MMU consistent method dma_sync behavior
  8. ** ============= ====================== =======================
  9. ** a) PA-7x00LC uncachable host memory flush/purge
  10. ** b) U2/Uturn cachable host memory NOP
  11. ** c) Ike/Astro cachable host memory NOP
  12. ** d) EPIC/SAGA memory on EPIC/SAGA flush/reset DMA channel
  13. **
  14. ** PA-7[13]00LC processors have a GSC bus interface and no I/O MMU.
  15. **
  16. ** Systems (eg PCX-T workstations) that don't fall into the above
  17. ** categories will need to modify the needed drivers to perform
  18. ** flush/purge and allocate "regular" cacheable pages for everything.
  19. */
  20. #define DMA_ERROR_CODE (~(dma_addr_t)0)
  21. #ifdef CONFIG_PA11
  22. extern struct dma_map_ops pcxl_dma_ops;
  23. extern struct dma_map_ops pcx_dma_ops;
  24. #endif
  25. extern struct dma_map_ops *hppa_dma_ops;
  26. static inline struct dma_map_ops *get_dma_ops(struct device *dev)
  27. {
  28. return hppa_dma_ops;
  29. }
  30. static inline void
  31. dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  32. enum dma_data_direction direction)
  33. {
  34. if (hppa_dma_ops->sync_single_for_cpu)
  35. flush_kernel_dcache_range((unsigned long)vaddr, size);
  36. }
  37. static inline void *
  38. parisc_walk_tree(struct device *dev)
  39. {
  40. struct device *otherdev;
  41. if(likely(dev->platform_data != NULL))
  42. return dev->platform_data;
  43. /* OK, just traverse the bus to find it */
  44. for(otherdev = dev->parent; otherdev;
  45. otherdev = otherdev->parent) {
  46. if(otherdev->platform_data) {
  47. dev->platform_data = otherdev->platform_data;
  48. break;
  49. }
  50. }
  51. return dev->platform_data;
  52. }
  53. #define GET_IOC(dev) ({ \
  54. void *__pdata = parisc_walk_tree(dev); \
  55. __pdata ? HBA_DATA(__pdata)->iommu : NULL; \
  56. })
  57. #ifdef CONFIG_IOMMU_CCIO
  58. struct parisc_device;
  59. struct ioc;
  60. void * ccio_get_iommu(const struct parisc_device *dev);
  61. int ccio_request_resource(const struct parisc_device *dev,
  62. struct resource *res);
  63. int ccio_allocate_resource(const struct parisc_device *dev,
  64. struct resource *res, unsigned long size,
  65. unsigned long min, unsigned long max, unsigned long align);
  66. #else /* !CONFIG_IOMMU_CCIO */
  67. #define ccio_get_iommu(dev) NULL
  68. #define ccio_request_resource(dev, res) insert_resource(&iomem_resource, res)
  69. #define ccio_allocate_resource(dev, res, size, min, max, align) \
  70. allocate_resource(&iomem_resource, res, size, min, max, \
  71. align, NULL, NULL)
  72. #endif /* !CONFIG_IOMMU_CCIO */
  73. #ifdef CONFIG_IOMMU_SBA
  74. struct parisc_device;
  75. void * sba_get_iommu(struct parisc_device *dev);
  76. #endif
  77. #endif