dma-mapping.h 1.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758
  1. #ifndef _ASM_IA64_DMA_MAPPING_H
  2. #define _ASM_IA64_DMA_MAPPING_H
  3. /*
  4. * Copyright (C) 2003-2004 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. */
  7. #include <asm/machvec.h>
  8. #include <linux/scatterlist.h>
  9. #include <asm/swiotlb.h>
  10. #include <linux/dma-debug.h>
  11. #define ARCH_HAS_DMA_GET_REQUIRED_MASK
  12. #define DMA_ERROR_CODE 0
  13. extern struct dma_map_ops *dma_ops;
  14. extern struct ia64_machine_vector ia64_mv;
  15. extern void set_iommu_machvec(void);
  16. extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
  17. enum dma_data_direction);
  18. extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
  19. enum dma_data_direction);
  20. #define get_dma_ops(dev) platform_dma_get_ops(dev)
  21. static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
  22. {
  23. if (!dev->dma_mask)
  24. return 0;
  25. return addr + size - 1 <= *dev->dma_mask;
  26. }
  27. static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
  28. {
  29. return paddr;
  30. }
  31. static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
  32. {
  33. return daddr;
  34. }
  35. static inline void
  36. dma_cache_sync (struct device *dev, void *vaddr, size_t size,
  37. enum dma_data_direction dir)
  38. {
  39. /*
  40. * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
  41. * ensure that dma_cache_sync() enforces order, hence the mb().
  42. */
  43. mb();
  44. }
  45. #endif /* _ASM_IA64_DMA_MAPPING_H */