swiotlb-xen.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. /*
  2. * Copyright 2010
  3. * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  4. *
  5. * This code provides a IOMMU for Xen PV guests with PCI passthrough.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License v2.0 as published by
  9. * the Free Software Foundation
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * PV guests under Xen are running in an non-contiguous memory architecture.
  17. *
  18. * When PCI pass-through is utilized, this necessitates an IOMMU for
  19. * translating bus (DMA) to virtual and vice-versa and also providing a
  20. * mechanism to have contiguous pages for device drivers operations (say DMA
  21. * operations).
  22. *
  23. * Specifically, under Xen the Linux idea of pages is an illusion. It
  24. * assumes that pages start at zero and go up to the available memory. To
  25. * help with that, the Linux Xen MMU provides a lookup mechanism to
  26. * translate the page frame numbers (PFN) to machine frame numbers (MFN)
  27. * and vice-versa. The MFN are the "real" frame numbers. Furthermore
  28. * memory is not contiguous. Xen hypervisor stitches memory for guests
  29. * from different pools, which means there is no guarantee that PFN==MFN
  30. * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
  31. * allocated in descending order (high to low), meaning the guest might
  32. * never get any MFN's under the 4GB mark.
  33. *
  34. */
  35. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  36. #include <linux/bootmem.h>
  37. #include <linux/dma-direct.h>
  38. #include <linux/export.h>
  39. #include <xen/swiotlb-xen.h>
  40. #include <xen/page.h>
  41. #include <xen/xen-ops.h>
  42. #include <xen/hvc-console.h>
  43. #include <asm/dma-mapping.h>
  44. #include <asm/xen/page-coherent.h>
  45. #include <trace/events/swiotlb.h>
  46. /*
  47. * Used to do a quick range check in swiotlb_tbl_unmap_single and
  48. * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
  49. * API.
  50. */
  51. #define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0)
  52. static char *xen_io_tlb_start, *xen_io_tlb_end;
  53. static unsigned long xen_io_tlb_nslabs;
  54. /*
  55. * Quick lookup value of the bus address of the IOTLB.
  56. */
  57. static u64 start_dma_addr;
  58. /*
  59. * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
  60. * can be 32bit when dma_addr_t is 64bit leading to a loss in
  61. * information if the shift is done before casting to 64bit.
  62. */
  63. static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
  64. {
  65. unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
  66. dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
  67. dma |= paddr & ~XEN_PAGE_MASK;
  68. return dma;
  69. }
  70. static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
  71. {
  72. unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
  73. dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
  74. phys_addr_t paddr = dma;
  75. paddr |= baddr & ~XEN_PAGE_MASK;
  76. return paddr;
  77. }
  78. static inline dma_addr_t xen_virt_to_bus(void *address)
  79. {
  80. return xen_phys_to_bus(virt_to_phys(address));
  81. }
  82. static int check_pages_physically_contiguous(unsigned long xen_pfn,
  83. unsigned int offset,
  84. size_t length)
  85. {
  86. unsigned long next_bfn;
  87. int i;
  88. int nr_pages;
  89. next_bfn = pfn_to_bfn(xen_pfn);
  90. nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
  91. for (i = 1; i < nr_pages; i++) {
  92. if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
  93. return 0;
  94. }
  95. return 1;
  96. }
  97. static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
  98. {
  99. unsigned long xen_pfn = XEN_PFN_DOWN(p);
  100. unsigned int offset = p & ~XEN_PAGE_MASK;
  101. if (offset + size <= XEN_PAGE_SIZE)
  102. return 0;
  103. if (check_pages_physically_contiguous(xen_pfn, offset, size))
  104. return 0;
  105. return 1;
  106. }
  107. static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
  108. {
  109. unsigned long bfn = XEN_PFN_DOWN(dma_addr);
  110. unsigned long xen_pfn = bfn_to_local_pfn(bfn);
  111. phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn);
  112. /* If the address is outside our domain, it CAN
  113. * have the same virtual address as another address
  114. * in our domain. Therefore _only_ check address within our domain.
  115. */
  116. if (pfn_valid(PFN_DOWN(paddr))) {
  117. return paddr >= virt_to_phys(xen_io_tlb_start) &&
  118. paddr < virt_to_phys(xen_io_tlb_end);
  119. }
  120. return 0;
  121. }
  122. static int max_dma_bits = 32;
  123. static int
  124. xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
  125. {
  126. int i, rc;
  127. int dma_bits;
  128. dma_addr_t dma_handle;
  129. phys_addr_t p = virt_to_phys(buf);
  130. dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
  131. i = 0;
  132. do {
  133. int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
  134. do {
  135. rc = xen_create_contiguous_region(
  136. p + (i << IO_TLB_SHIFT),
  137. get_order(slabs << IO_TLB_SHIFT),
  138. dma_bits, &dma_handle);
  139. } while (rc && dma_bits++ < max_dma_bits);
  140. if (rc)
  141. return rc;
  142. i += slabs;
  143. } while (i < nslabs);
  144. return 0;
  145. }
  146. static unsigned long xen_set_nslabs(unsigned long nr_tbl)
  147. {
  148. if (!nr_tbl) {
  149. xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
  150. xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
  151. } else
  152. xen_io_tlb_nslabs = nr_tbl;
  153. return xen_io_tlb_nslabs << IO_TLB_SHIFT;
  154. }
  155. enum xen_swiotlb_err {
  156. XEN_SWIOTLB_UNKNOWN = 0,
  157. XEN_SWIOTLB_ENOMEM,
  158. XEN_SWIOTLB_EFIXUP
  159. };
  160. static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
  161. {
  162. switch (err) {
  163. case XEN_SWIOTLB_ENOMEM:
  164. return "Cannot allocate Xen-SWIOTLB buffer\n";
  165. case XEN_SWIOTLB_EFIXUP:
  166. return "Failed to get contiguous memory for DMA from Xen!\n"\
  167. "You either: don't have the permissions, do not have"\
  168. " enough free memory under 4GB, or the hypervisor memory"\
  169. " is too fragmented!";
  170. default:
  171. break;
  172. }
  173. return "";
  174. }
  175. int __ref xen_swiotlb_init(int verbose, bool early)
  176. {
  177. unsigned long bytes, order;
  178. int rc = -ENOMEM;
  179. enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
  180. unsigned int repeat = 3;
  181. xen_io_tlb_nslabs = swiotlb_nr_tbl();
  182. retry:
  183. bytes = xen_set_nslabs(xen_io_tlb_nslabs);
  184. order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
  185. /*
  186. * Get IO TLB memory from any location.
  187. */
  188. if (early)
  189. xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
  190. else {
  191. #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
  192. #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
  193. while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
  194. xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
  195. if (xen_io_tlb_start)
  196. break;
  197. order--;
  198. }
  199. if (order != get_order(bytes)) {
  200. pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
  201. (PAGE_SIZE << order) >> 20);
  202. xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
  203. bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
  204. }
  205. }
  206. if (!xen_io_tlb_start) {
  207. m_ret = XEN_SWIOTLB_ENOMEM;
  208. goto error;
  209. }
  210. xen_io_tlb_end = xen_io_tlb_start + bytes;
  211. /*
  212. * And replace that memory with pages under 4GB.
  213. */
  214. rc = xen_swiotlb_fixup(xen_io_tlb_start,
  215. bytes,
  216. xen_io_tlb_nslabs);
  217. if (rc) {
  218. if (early)
  219. free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
  220. else {
  221. free_pages((unsigned long)xen_io_tlb_start, order);
  222. xen_io_tlb_start = NULL;
  223. }
  224. m_ret = XEN_SWIOTLB_EFIXUP;
  225. goto error;
  226. }
  227. start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
  228. if (early) {
  229. if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
  230. verbose))
  231. panic("Cannot allocate SWIOTLB buffer");
  232. rc = 0;
  233. } else
  234. rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
  235. if (!rc)
  236. swiotlb_set_max_segment(PAGE_SIZE);
  237. return rc;
  238. error:
  239. if (repeat--) {
  240. xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
  241. (xen_io_tlb_nslabs >> 1));
  242. pr_info("Lowering to %luMB\n",
  243. (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
  244. goto retry;
  245. }
  246. pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
  247. if (early)
  248. panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
  249. else
  250. free_pages((unsigned long)xen_io_tlb_start, order);
  251. return rc;
  252. }
  253. static void *
  254. xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
  255. dma_addr_t *dma_handle, gfp_t flags,
  256. unsigned long attrs)
  257. {
  258. void *ret;
  259. int order = get_order(size);
  260. u64 dma_mask = DMA_BIT_MASK(32);
  261. phys_addr_t phys;
  262. dma_addr_t dev_addr;
  263. /*
  264. * Ignore region specifiers - the kernel's ideas of
  265. * pseudo-phys memory layout has nothing to do with the
  266. * machine physical layout. We can't allocate highmem
  267. * because we can't return a pointer to it.
  268. */
  269. flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
  270. /* Convert the size to actually allocated. */
  271. size = 1UL << (order + XEN_PAGE_SHIFT);
  272. /* On ARM this function returns an ioremap'ped virtual address for
  273. * which virt_to_phys doesn't return the corresponding physical
  274. * address. In fact on ARM virt_to_phys only works for kernel direct
  275. * mapped RAM memory. Also see comment below.
  276. */
  277. ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
  278. if (!ret)
  279. return ret;
  280. if (hwdev && hwdev->coherent_dma_mask)
  281. dma_mask = hwdev->coherent_dma_mask;
  282. /* At this point dma_handle is the physical address, next we are
  283. * going to set it to the machine address.
  284. * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
  285. * to *dma_handle. */
  286. phys = *dma_handle;
  287. dev_addr = xen_phys_to_bus(phys);
  288. if (((dev_addr + size - 1 <= dma_mask)) &&
  289. !range_straddles_page_boundary(phys, size))
  290. *dma_handle = dev_addr;
  291. else {
  292. if (xen_create_contiguous_region(phys, order,
  293. fls64(dma_mask), dma_handle) != 0) {
  294. xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
  295. return NULL;
  296. }
  297. }
  298. memset(ret, 0, size);
  299. return ret;
  300. }
  301. static void
  302. xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
  303. dma_addr_t dev_addr, unsigned long attrs)
  304. {
  305. int order = get_order(size);
  306. phys_addr_t phys;
  307. u64 dma_mask = DMA_BIT_MASK(32);
  308. if (hwdev && hwdev->coherent_dma_mask)
  309. dma_mask = hwdev->coherent_dma_mask;
  310. /* do not use virt_to_phys because on ARM it doesn't return you the
  311. * physical address */
  312. phys = xen_bus_to_phys(dev_addr);
  313. /* Convert the size to actually allocated. */
  314. size = 1UL << (order + XEN_PAGE_SHIFT);
  315. if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
  316. range_straddles_page_boundary(phys, size)))
  317. xen_destroy_contiguous_region(phys, order);
  318. xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
  319. }
  320. /*
  321. * Map a single buffer of the indicated size for DMA in streaming mode. The
  322. * physical address to use is returned.
  323. *
  324. * Once the device is given the dma address, the device owns this memory until
  325. * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
  326. */
  327. static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
  328. unsigned long offset, size_t size,
  329. enum dma_data_direction dir,
  330. unsigned long attrs)
  331. {
  332. phys_addr_t map, phys = page_to_phys(page) + offset;
  333. dma_addr_t dev_addr = xen_phys_to_bus(phys);
  334. BUG_ON(dir == DMA_NONE);
  335. /*
  336. * If the address happens to be in the device's DMA window,
  337. * we can safely return the device addr and not worry about bounce
  338. * buffering it.
  339. */
  340. if (dma_capable(dev, dev_addr, size) &&
  341. !range_straddles_page_boundary(phys, size) &&
  342. !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
  343. (swiotlb_force != SWIOTLB_FORCE)) {
  344. /* we are not interested in the dma_addr returned by
  345. * xen_dma_map_page, only in the potential cache flushes executed
  346. * by the function. */
  347. xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
  348. return dev_addr;
  349. }
  350. /*
  351. * Oh well, have to allocate and map a bounce buffer.
  352. */
  353. trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
  354. map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
  355. attrs);
  356. if (map == SWIOTLB_MAP_ERROR)
  357. return XEN_SWIOTLB_ERROR_CODE;
  358. dev_addr = xen_phys_to_bus(map);
  359. xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
  360. dev_addr, map & ~PAGE_MASK, size, dir, attrs);
  361. /*
  362. * Ensure that the address returned is DMA'ble
  363. */
  364. if (dma_capable(dev, dev_addr, size))
  365. return dev_addr;
  366. attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  367. swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
  368. return XEN_SWIOTLB_ERROR_CODE;
  369. }
  370. /*
  371. * Unmap a single streaming mode DMA translation. The dma_addr and size must
  372. * match what was provided for in a previous xen_swiotlb_map_page call. All
  373. * other usages are undefined.
  374. *
  375. * After this call, reads by the cpu to the buffer are guaranteed to see
  376. * whatever the device wrote there.
  377. */
  378. static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
  379. size_t size, enum dma_data_direction dir,
  380. unsigned long attrs)
  381. {
  382. phys_addr_t paddr = xen_bus_to_phys(dev_addr);
  383. BUG_ON(dir == DMA_NONE);
  384. xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
  385. /* NOTE: We use dev_addr here, not paddr! */
  386. if (is_xen_swiotlb_buffer(dev_addr)) {
  387. swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
  388. return;
  389. }
  390. if (dir != DMA_FROM_DEVICE)
  391. return;
  392. /*
  393. * phys_to_virt doesn't work with hihgmem page but we could
  394. * call dma_mark_clean() with hihgmem page here. However, we
  395. * are fine since dma_mark_clean() is null on POWERPC. We can
  396. * make dma_mark_clean() take a physical address if necessary.
  397. */
  398. dma_mark_clean(phys_to_virt(paddr), size);
  399. }
  400. static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
  401. size_t size, enum dma_data_direction dir,
  402. unsigned long attrs)
  403. {
  404. xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
  405. }
  406. /*
  407. * Make physical memory consistent for a single streaming mode DMA translation
  408. * after a transfer.
  409. *
  410. * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
  411. * using the cpu, yet do not wish to teardown the dma mapping, you must
  412. * call this function before doing so. At the next point you give the dma
  413. * address back to the card, you must first perform a
  414. * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
  415. */
  416. static void
  417. xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
  418. size_t size, enum dma_data_direction dir,
  419. enum dma_sync_target target)
  420. {
  421. phys_addr_t paddr = xen_bus_to_phys(dev_addr);
  422. BUG_ON(dir == DMA_NONE);
  423. if (target == SYNC_FOR_CPU)
  424. xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
  425. /* NOTE: We use dev_addr here, not paddr! */
  426. if (is_xen_swiotlb_buffer(dev_addr))
  427. swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
  428. if (target == SYNC_FOR_DEVICE)
  429. xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
  430. if (dir != DMA_FROM_DEVICE)
  431. return;
  432. dma_mark_clean(phys_to_virt(paddr), size);
  433. }
  434. void
  435. xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
  436. size_t size, enum dma_data_direction dir)
  437. {
  438. xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
  439. }
  440. void
  441. xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
  442. size_t size, enum dma_data_direction dir)
  443. {
  444. xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
  445. }
  446. /*
  447. * Unmap a set of streaming mode DMA translations. Again, cpu read rules
  448. * concerning calls here are the same as for swiotlb_unmap_page() above.
  449. */
  450. static void
  451. xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
  452. int nelems, enum dma_data_direction dir,
  453. unsigned long attrs)
  454. {
  455. struct scatterlist *sg;
  456. int i;
  457. BUG_ON(dir == DMA_NONE);
  458. for_each_sg(sgl, sg, nelems, i)
  459. xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
  460. }
  461. /*
  462. * Map a set of buffers described by scatterlist in streaming mode for DMA.
  463. * This is the scatter-gather version of the above xen_swiotlb_map_page
  464. * interface. Here the scatter gather list elements are each tagged with the
  465. * appropriate dma address and length. They are obtained via
  466. * sg_dma_{address,length}(SG).
  467. *
  468. * NOTE: An implementation may be able to use a smaller number of
  469. * DMA address/length pairs than there are SG table elements.
  470. * (for example via virtual mapping capabilities)
  471. * The routine returns the number of addr/length pairs actually
  472. * used, at most nents.
  473. *
  474. * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
  475. * same here.
  476. */
  477. static int
  478. xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
  479. int nelems, enum dma_data_direction dir,
  480. unsigned long attrs)
  481. {
  482. struct scatterlist *sg;
  483. int i;
  484. BUG_ON(dir == DMA_NONE);
  485. for_each_sg(sgl, sg, nelems, i) {
  486. phys_addr_t paddr = sg_phys(sg);
  487. dma_addr_t dev_addr = xen_phys_to_bus(paddr);
  488. if (swiotlb_force == SWIOTLB_FORCE ||
  489. xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
  490. !dma_capable(hwdev, dev_addr, sg->length) ||
  491. range_straddles_page_boundary(paddr, sg->length)) {
  492. phys_addr_t map = swiotlb_tbl_map_single(hwdev,
  493. start_dma_addr,
  494. sg_phys(sg),
  495. sg->length,
  496. dir, attrs);
  497. if (map == SWIOTLB_MAP_ERROR) {
  498. dev_warn(hwdev, "swiotlb buffer is full\n");
  499. /* Don't panic here, we expect map_sg users
  500. to do proper error handling. */
  501. attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  502. xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
  503. attrs);
  504. sg_dma_len(sgl) = 0;
  505. return 0;
  506. }
  507. dev_addr = xen_phys_to_bus(map);
  508. xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
  509. dev_addr,
  510. map & ~PAGE_MASK,
  511. sg->length,
  512. dir,
  513. attrs);
  514. sg->dma_address = dev_addr;
  515. } else {
  516. /* we are not interested in the dma_addr returned by
  517. * xen_dma_map_page, only in the potential cache flushes executed
  518. * by the function. */
  519. xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
  520. dev_addr,
  521. paddr & ~PAGE_MASK,
  522. sg->length,
  523. dir,
  524. attrs);
  525. sg->dma_address = dev_addr;
  526. }
  527. sg_dma_len(sg) = sg->length;
  528. }
  529. return nelems;
  530. }
  531. /*
  532. * Make physical memory consistent for a set of streaming mode DMA translations
  533. * after a transfer.
  534. *
  535. * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
  536. * and usage.
  537. */
  538. static void
  539. xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
  540. int nelems, enum dma_data_direction dir,
  541. enum dma_sync_target target)
  542. {
  543. struct scatterlist *sg;
  544. int i;
  545. for_each_sg(sgl, sg, nelems, i)
  546. xen_swiotlb_sync_single(hwdev, sg->dma_address,
  547. sg_dma_len(sg), dir, target);
  548. }
  549. static void
  550. xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
  551. int nelems, enum dma_data_direction dir)
  552. {
  553. xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
  554. }
  555. static void
  556. xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
  557. int nelems, enum dma_data_direction dir)
  558. {
  559. xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
  560. }
  561. /*
  562. * Return whether the given device DMA address mask can be supported
  563. * properly. For example, if your device can only drive the low 24-bits
  564. * during bus mastering, then you would pass 0x00ffffff as the mask to
  565. * this function.
  566. */
  567. static int
  568. xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
  569. {
  570. return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
  571. }
  572. /*
  573. * Create userspace mapping for the DMA-coherent memory.
  574. * This function should be called with the pages from the current domain only,
  575. * passing pages mapped from other domains would lead to memory corruption.
  576. */
  577. static int
  578. xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
  579. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  580. unsigned long attrs)
  581. {
  582. #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  583. if (xen_get_dma_ops(dev)->mmap)
  584. return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
  585. dma_addr, size, attrs);
  586. #endif
  587. return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
  588. }
  589. /*
  590. * This function should be called with the pages from the current domain only,
  591. * passing pages mapped from other domains would lead to memory corruption.
  592. */
  593. static int
  594. xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
  595. void *cpu_addr, dma_addr_t handle, size_t size,
  596. unsigned long attrs)
  597. {
  598. #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  599. if (xen_get_dma_ops(dev)->get_sgtable) {
  600. #if 0
  601. /*
  602. * This check verifies that the page belongs to the current domain and
  603. * is not one mapped from another domain.
  604. * This check is for debug only, and should not go to production build
  605. */
  606. unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
  607. BUG_ON (!page_is_ram(bfn));
  608. #endif
  609. return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
  610. handle, size, attrs);
  611. }
  612. #endif
  613. return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
  614. }
  615. static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr)
  616. {
  617. return dma_addr == XEN_SWIOTLB_ERROR_CODE;
  618. }
  619. const struct dma_map_ops xen_swiotlb_dma_ops = {
  620. .alloc = xen_swiotlb_alloc_coherent,
  621. .free = xen_swiotlb_free_coherent,
  622. .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
  623. .sync_single_for_device = xen_swiotlb_sync_single_for_device,
  624. .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
  625. .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
  626. .map_sg = xen_swiotlb_map_sg_attrs,
  627. .unmap_sg = xen_swiotlb_unmap_sg_attrs,
  628. .map_page = xen_swiotlb_map_page,
  629. .unmap_page = xen_swiotlb_unmap_page,
  630. .dma_supported = xen_swiotlb_dma_supported,
  631. .mmap = xen_swiotlb_dma_mmap,
  632. .get_sgtable = xen_swiotlb_get_sgtable,
  633. .mapping_error = xen_swiotlb_mapping_error,
  634. };