iommu.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160
  1. /*
  2. * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
  3. *
  4. * Rewrite, cleanup, new allocation schemes, virtual merging:
  5. * Copyright (C) 2004 Olof Johansson, IBM Corporation
  6. * and Ben. Herrenschmidt, IBM Corporation
  7. *
  8. * Dynamic DMA mapping support, bus-independent parts.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  23. */
  24. #include <linux/init.h>
  25. #include <linux/types.h>
  26. #include <linux/slab.h>
  27. #include <linux/mm.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/string.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/bitmap.h>
  32. #include <linux/iommu-helper.h>
  33. #include <linux/crash_dump.h>
  34. #include <linux/hash.h>
  35. #include <linux/fault-inject.h>
  36. #include <linux/pci.h>
  37. #include <linux/iommu.h>
  38. #include <linux/sched.h>
  39. #include <asm/io.h>
  40. #include <asm/prom.h>
  41. #include <asm/iommu.h>
  42. #include <asm/pci-bridge.h>
  43. #include <asm/machdep.h>
  44. #include <asm/kdump.h>
  45. #include <asm/fadump.h>
  46. #include <asm/vio.h>
  47. #include <asm/tce.h>
  48. #define DBG(...)
  49. static int novmerge;
  50. static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
  51. static int __init setup_iommu(char *str)
  52. {
  53. if (!strcmp(str, "novmerge"))
  54. novmerge = 1;
  55. else if (!strcmp(str, "vmerge"))
  56. novmerge = 0;
  57. return 1;
  58. }
  59. __setup("iommu=", setup_iommu);
  60. static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
  61. /*
  62. * We precalculate the hash to avoid doing it on every allocation.
  63. *
  64. * The hash is important to spread CPUs across all the pools. For example,
  65. * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
  66. * with 4 pools all primary threads would map to the same pool.
  67. */
  68. static int __init setup_iommu_pool_hash(void)
  69. {
  70. unsigned int i;
  71. for_each_possible_cpu(i)
  72. per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
  73. return 0;
  74. }
  75. subsys_initcall(setup_iommu_pool_hash);
  76. #ifdef CONFIG_FAIL_IOMMU
  77. static DECLARE_FAULT_ATTR(fail_iommu);
  78. static int __init setup_fail_iommu(char *str)
  79. {
  80. return setup_fault_attr(&fail_iommu, str);
  81. }
  82. __setup("fail_iommu=", setup_fail_iommu);
  83. static bool should_fail_iommu(struct device *dev)
  84. {
  85. return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
  86. }
  87. static int __init fail_iommu_debugfs(void)
  88. {
  89. struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
  90. NULL, &fail_iommu);
  91. return PTR_ERR_OR_ZERO(dir);
  92. }
  93. late_initcall(fail_iommu_debugfs);
  94. static ssize_t fail_iommu_show(struct device *dev,
  95. struct device_attribute *attr, char *buf)
  96. {
  97. return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
  98. }
  99. static ssize_t fail_iommu_store(struct device *dev,
  100. struct device_attribute *attr, const char *buf,
  101. size_t count)
  102. {
  103. int i;
  104. if (count > 0 && sscanf(buf, "%d", &i) > 0)
  105. dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
  106. return count;
  107. }
  108. static DEVICE_ATTR(fail_iommu, S_IRUGO|S_IWUSR, fail_iommu_show,
  109. fail_iommu_store);
  110. static int fail_iommu_bus_notify(struct notifier_block *nb,
  111. unsigned long action, void *data)
  112. {
  113. struct device *dev = data;
  114. if (action == BUS_NOTIFY_ADD_DEVICE) {
  115. if (device_create_file(dev, &dev_attr_fail_iommu))
  116. pr_warn("Unable to create IOMMU fault injection sysfs "
  117. "entries\n");
  118. } else if (action == BUS_NOTIFY_DEL_DEVICE) {
  119. device_remove_file(dev, &dev_attr_fail_iommu);
  120. }
  121. return 0;
  122. }
  123. static struct notifier_block fail_iommu_bus_notifier = {
  124. .notifier_call = fail_iommu_bus_notify
  125. };
  126. static int __init fail_iommu_setup(void)
  127. {
  128. #ifdef CONFIG_PCI
  129. bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
  130. #endif
  131. #ifdef CONFIG_IBMVIO
  132. bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
  133. #endif
  134. return 0;
  135. }
  136. /*
  137. * Must execute after PCI and VIO subsystem have initialised but before
  138. * devices are probed.
  139. */
  140. arch_initcall(fail_iommu_setup);
  141. #else
  142. static inline bool should_fail_iommu(struct device *dev)
  143. {
  144. return false;
  145. }
  146. #endif
  147. static unsigned long iommu_range_alloc(struct device *dev,
  148. struct iommu_table *tbl,
  149. unsigned long npages,
  150. unsigned long *handle,
  151. unsigned long mask,
  152. unsigned int align_order)
  153. {
  154. unsigned long n, end, start;
  155. unsigned long limit;
  156. int largealloc = npages > 15;
  157. int pass = 0;
  158. unsigned long align_mask;
  159. unsigned long boundary_size;
  160. unsigned long flags;
  161. unsigned int pool_nr;
  162. struct iommu_pool *pool;
  163. align_mask = 0xffffffffffffffffl >> (64 - align_order);
  164. /* This allocator was derived from x86_64's bit string search */
  165. /* Sanity check */
  166. if (unlikely(npages == 0)) {
  167. if (printk_ratelimit())
  168. WARN_ON(1);
  169. return DMA_ERROR_CODE;
  170. }
  171. if (should_fail_iommu(dev))
  172. return DMA_ERROR_CODE;
  173. /*
  174. * We don't need to disable preemption here because any CPU can
  175. * safely use any IOMMU pool.
  176. */
  177. pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
  178. if (largealloc)
  179. pool = &(tbl->large_pool);
  180. else
  181. pool = &(tbl->pools[pool_nr]);
  182. spin_lock_irqsave(&(pool->lock), flags);
  183. again:
  184. if ((pass == 0) && handle && *handle &&
  185. (*handle >= pool->start) && (*handle < pool->end))
  186. start = *handle;
  187. else
  188. start = pool->hint;
  189. limit = pool->end;
  190. /* The case below can happen if we have a small segment appended
  191. * to a large, or when the previous alloc was at the very end of
  192. * the available space. If so, go back to the initial start.
  193. */
  194. if (start >= limit)
  195. start = pool->start;
  196. if (limit + tbl->it_offset > mask) {
  197. limit = mask - tbl->it_offset + 1;
  198. /* If we're constrained on address range, first try
  199. * at the masked hint to avoid O(n) search complexity,
  200. * but on second pass, start at 0 in pool 0.
  201. */
  202. if ((start & mask) >= limit || pass > 0) {
  203. spin_unlock(&(pool->lock));
  204. pool = &(tbl->pools[0]);
  205. spin_lock(&(pool->lock));
  206. start = pool->start;
  207. } else {
  208. start &= mask;
  209. }
  210. }
  211. if (dev)
  212. boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
  213. 1 << tbl->it_page_shift);
  214. else
  215. boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
  216. /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
  217. n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
  218. boundary_size >> tbl->it_page_shift, align_mask);
  219. if (n == -1) {
  220. if (likely(pass == 0)) {
  221. /* First try the pool from the start */
  222. pool->hint = pool->start;
  223. pass++;
  224. goto again;
  225. } else if (pass <= tbl->nr_pools) {
  226. /* Now try scanning all the other pools */
  227. spin_unlock(&(pool->lock));
  228. pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
  229. pool = &tbl->pools[pool_nr];
  230. spin_lock(&(pool->lock));
  231. pool->hint = pool->start;
  232. pass++;
  233. goto again;
  234. } else {
  235. /* Give up */
  236. spin_unlock_irqrestore(&(pool->lock), flags);
  237. return DMA_ERROR_CODE;
  238. }
  239. }
  240. end = n + npages;
  241. /* Bump the hint to a new block for small allocs. */
  242. if (largealloc) {
  243. /* Don't bump to new block to avoid fragmentation */
  244. pool->hint = end;
  245. } else {
  246. /* Overflow will be taken care of at the next allocation */
  247. pool->hint = (end + tbl->it_blocksize - 1) &
  248. ~(tbl->it_blocksize - 1);
  249. }
  250. /* Update handle for SG allocations */
  251. if (handle)
  252. *handle = end;
  253. spin_unlock_irqrestore(&(pool->lock), flags);
  254. return n;
  255. }
  256. static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
  257. void *page, unsigned int npages,
  258. enum dma_data_direction direction,
  259. unsigned long mask, unsigned int align_order,
  260. unsigned long attrs)
  261. {
  262. unsigned long entry;
  263. dma_addr_t ret = DMA_ERROR_CODE;
  264. int build_fail;
  265. entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
  266. if (unlikely(entry == DMA_ERROR_CODE))
  267. return DMA_ERROR_CODE;
  268. entry += tbl->it_offset; /* Offset into real TCE table */
  269. ret = entry << tbl->it_page_shift; /* Set the return dma address */
  270. /* Put the TCEs in the HW table */
  271. build_fail = tbl->it_ops->set(tbl, entry, npages,
  272. (unsigned long)page &
  273. IOMMU_PAGE_MASK(tbl), direction, attrs);
  274. /* tbl->it_ops->set() only returns non-zero for transient errors.
  275. * Clean up the table bitmap in this case and return
  276. * DMA_ERROR_CODE. For all other errors the functionality is
  277. * not altered.
  278. */
  279. if (unlikely(build_fail)) {
  280. __iommu_free(tbl, ret, npages);
  281. return DMA_ERROR_CODE;
  282. }
  283. /* Flush/invalidate TLB caches if necessary */
  284. if (tbl->it_ops->flush)
  285. tbl->it_ops->flush(tbl);
  286. /* Make sure updates are seen by hardware */
  287. mb();
  288. return ret;
  289. }
  290. static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
  291. unsigned int npages)
  292. {
  293. unsigned long entry, free_entry;
  294. entry = dma_addr >> tbl->it_page_shift;
  295. free_entry = entry - tbl->it_offset;
  296. if (((free_entry + npages) > tbl->it_size) ||
  297. (entry < tbl->it_offset)) {
  298. if (printk_ratelimit()) {
  299. printk(KERN_INFO "iommu_free: invalid entry\n");
  300. printk(KERN_INFO "\tentry = 0x%lx\n", entry);
  301. printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
  302. printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
  303. printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
  304. printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
  305. printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
  306. printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
  307. WARN_ON(1);
  308. }
  309. return false;
  310. }
  311. return true;
  312. }
  313. static struct iommu_pool *get_pool(struct iommu_table *tbl,
  314. unsigned long entry)
  315. {
  316. struct iommu_pool *p;
  317. unsigned long largepool_start = tbl->large_pool.start;
  318. /* The large pool is the last pool at the top of the table */
  319. if (entry >= largepool_start) {
  320. p = &tbl->large_pool;
  321. } else {
  322. unsigned int pool_nr = entry / tbl->poolsize;
  323. BUG_ON(pool_nr > tbl->nr_pools);
  324. p = &tbl->pools[pool_nr];
  325. }
  326. return p;
  327. }
  328. static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
  329. unsigned int npages)
  330. {
  331. unsigned long entry, free_entry;
  332. unsigned long flags;
  333. struct iommu_pool *pool;
  334. entry = dma_addr >> tbl->it_page_shift;
  335. free_entry = entry - tbl->it_offset;
  336. pool = get_pool(tbl, free_entry);
  337. if (!iommu_free_check(tbl, dma_addr, npages))
  338. return;
  339. tbl->it_ops->clear(tbl, entry, npages);
  340. spin_lock_irqsave(&(pool->lock), flags);
  341. bitmap_clear(tbl->it_map, free_entry, npages);
  342. spin_unlock_irqrestore(&(pool->lock), flags);
  343. }
  344. static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
  345. unsigned int npages)
  346. {
  347. __iommu_free(tbl, dma_addr, npages);
  348. /* Make sure TLB cache is flushed if the HW needs it. We do
  349. * not do an mb() here on purpose, it is not needed on any of
  350. * the current platforms.
  351. */
  352. if (tbl->it_ops->flush)
  353. tbl->it_ops->flush(tbl);
  354. }
  355. int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
  356. struct scatterlist *sglist, int nelems,
  357. unsigned long mask, enum dma_data_direction direction,
  358. unsigned long attrs)
  359. {
  360. dma_addr_t dma_next = 0, dma_addr;
  361. struct scatterlist *s, *outs, *segstart;
  362. int outcount, incount, i, build_fail = 0;
  363. unsigned int align;
  364. unsigned long handle;
  365. unsigned int max_seg_size;
  366. BUG_ON(direction == DMA_NONE);
  367. if ((nelems == 0) || !tbl)
  368. return 0;
  369. outs = s = segstart = &sglist[0];
  370. outcount = 1;
  371. incount = nelems;
  372. handle = 0;
  373. /* Init first segment length for backout at failure */
  374. outs->dma_length = 0;
  375. DBG("sg mapping %d elements:\n", nelems);
  376. max_seg_size = dma_get_max_seg_size(dev);
  377. for_each_sg(sglist, s, nelems, i) {
  378. unsigned long vaddr, npages, entry, slen;
  379. slen = s->length;
  380. /* Sanity check */
  381. if (slen == 0) {
  382. dma_next = 0;
  383. continue;
  384. }
  385. /* Allocate iommu entries for that segment */
  386. vaddr = (unsigned long) sg_virt(s);
  387. npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
  388. align = 0;
  389. if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
  390. (vaddr & ~PAGE_MASK) == 0)
  391. align = PAGE_SHIFT - tbl->it_page_shift;
  392. entry = iommu_range_alloc(dev, tbl, npages, &handle,
  393. mask >> tbl->it_page_shift, align);
  394. DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
  395. /* Handle failure */
  396. if (unlikely(entry == DMA_ERROR_CODE)) {
  397. if (!(attrs & DMA_ATTR_NO_WARN) &&
  398. printk_ratelimit())
  399. dev_info(dev, "iommu_alloc failed, tbl %p "
  400. "vaddr %lx npages %lu\n", tbl, vaddr,
  401. npages);
  402. goto failure;
  403. }
  404. /* Convert entry to a dma_addr_t */
  405. entry += tbl->it_offset;
  406. dma_addr = entry << tbl->it_page_shift;
  407. dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
  408. DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
  409. npages, entry, dma_addr);
  410. /* Insert into HW table */
  411. build_fail = tbl->it_ops->set(tbl, entry, npages,
  412. vaddr & IOMMU_PAGE_MASK(tbl),
  413. direction, attrs);
  414. if(unlikely(build_fail))
  415. goto failure;
  416. /* If we are in an open segment, try merging */
  417. if (segstart != s) {
  418. DBG(" - trying merge...\n");
  419. /* We cannot merge if:
  420. * - allocated dma_addr isn't contiguous to previous allocation
  421. */
  422. if (novmerge || (dma_addr != dma_next) ||
  423. (outs->dma_length + s->length > max_seg_size)) {
  424. /* Can't merge: create a new segment */
  425. segstart = s;
  426. outcount++;
  427. outs = sg_next(outs);
  428. DBG(" can't merge, new segment.\n");
  429. } else {
  430. outs->dma_length += s->length;
  431. DBG(" merged, new len: %ux\n", outs->dma_length);
  432. }
  433. }
  434. if (segstart == s) {
  435. /* This is a new segment, fill entries */
  436. DBG(" - filling new segment.\n");
  437. outs->dma_address = dma_addr;
  438. outs->dma_length = slen;
  439. }
  440. /* Calculate next page pointer for contiguous check */
  441. dma_next = dma_addr + slen;
  442. DBG(" - dma next is: %lx\n", dma_next);
  443. }
  444. /* Flush/invalidate TLB caches if necessary */
  445. if (tbl->it_ops->flush)
  446. tbl->it_ops->flush(tbl);
  447. DBG("mapped %d elements:\n", outcount);
  448. /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
  449. * next entry of the sglist if we didn't fill the list completely
  450. */
  451. if (outcount < incount) {
  452. outs = sg_next(outs);
  453. outs->dma_address = DMA_ERROR_CODE;
  454. outs->dma_length = 0;
  455. }
  456. /* Make sure updates are seen by hardware */
  457. mb();
  458. return outcount;
  459. failure:
  460. for_each_sg(sglist, s, nelems, i) {
  461. if (s->dma_length != 0) {
  462. unsigned long vaddr, npages;
  463. vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
  464. npages = iommu_num_pages(s->dma_address, s->dma_length,
  465. IOMMU_PAGE_SIZE(tbl));
  466. __iommu_free(tbl, vaddr, npages);
  467. s->dma_address = DMA_ERROR_CODE;
  468. s->dma_length = 0;
  469. }
  470. if (s == outs)
  471. break;
  472. }
  473. return 0;
  474. }
  475. void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
  476. int nelems, enum dma_data_direction direction,
  477. unsigned long attrs)
  478. {
  479. struct scatterlist *sg;
  480. BUG_ON(direction == DMA_NONE);
  481. if (!tbl)
  482. return;
  483. sg = sglist;
  484. while (nelems--) {
  485. unsigned int npages;
  486. dma_addr_t dma_handle = sg->dma_address;
  487. if (sg->dma_length == 0)
  488. break;
  489. npages = iommu_num_pages(dma_handle, sg->dma_length,
  490. IOMMU_PAGE_SIZE(tbl));
  491. __iommu_free(tbl, dma_handle, npages);
  492. sg = sg_next(sg);
  493. }
  494. /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
  495. * do not do an mb() here, the affected platforms do not need it
  496. * when freeing.
  497. */
  498. if (tbl->it_ops->flush)
  499. tbl->it_ops->flush(tbl);
  500. }
  501. static void iommu_table_clear(struct iommu_table *tbl)
  502. {
  503. /*
  504. * In case of firmware assisted dump system goes through clean
  505. * reboot process at the time of system crash. Hence it's safe to
  506. * clear the TCE entries if firmware assisted dump is active.
  507. */
  508. if (!is_kdump_kernel() || is_fadump_active()) {
  509. /* Clear the table in case firmware left allocations in it */
  510. tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
  511. return;
  512. }
  513. #ifdef CONFIG_CRASH_DUMP
  514. if (tbl->it_ops->get) {
  515. unsigned long index, tceval, tcecount = 0;
  516. /* Reserve the existing mappings left by the first kernel. */
  517. for (index = 0; index < tbl->it_size; index++) {
  518. tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
  519. /*
  520. * Freed TCE entry contains 0x7fffffffffffffff on JS20
  521. */
  522. if (tceval && (tceval != 0x7fffffffffffffffUL)) {
  523. __set_bit(index, tbl->it_map);
  524. tcecount++;
  525. }
  526. }
  527. if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
  528. printk(KERN_WARNING "TCE table is full; freeing ");
  529. printk(KERN_WARNING "%d entries for the kdump boot\n",
  530. KDUMP_MIN_TCE_ENTRIES);
  531. for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
  532. index < tbl->it_size; index++)
  533. __clear_bit(index, tbl->it_map);
  534. }
  535. }
  536. #endif
  537. }
  538. /*
  539. * Build a iommu_table structure. This contains a bit map which
  540. * is used to manage allocation of the tce space.
  541. */
  542. struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
  543. {
  544. unsigned long sz;
  545. static int welcomed = 0;
  546. struct page *page;
  547. unsigned int i;
  548. struct iommu_pool *p;
  549. BUG_ON(!tbl->it_ops);
  550. /* number of bytes needed for the bitmap */
  551. sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
  552. page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
  553. if (!page)
  554. panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
  555. tbl->it_map = page_address(page);
  556. memset(tbl->it_map, 0, sz);
  557. /*
  558. * Reserve page 0 so it will not be used for any mappings.
  559. * This avoids buggy drivers that consider page 0 to be invalid
  560. * to crash the machine or even lose data.
  561. */
  562. if (tbl->it_offset == 0)
  563. set_bit(0, tbl->it_map);
  564. /* We only split the IOMMU table if we have 1GB or more of space */
  565. if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
  566. tbl->nr_pools = IOMMU_NR_POOLS;
  567. else
  568. tbl->nr_pools = 1;
  569. /* We reserve the top 1/4 of the table for large allocations */
  570. tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
  571. for (i = 0; i < tbl->nr_pools; i++) {
  572. p = &tbl->pools[i];
  573. spin_lock_init(&(p->lock));
  574. p->start = tbl->poolsize * i;
  575. p->hint = p->start;
  576. p->end = p->start + tbl->poolsize;
  577. }
  578. p = &tbl->large_pool;
  579. spin_lock_init(&(p->lock));
  580. p->start = tbl->poolsize * i;
  581. p->hint = p->start;
  582. p->end = tbl->it_size;
  583. iommu_table_clear(tbl);
  584. if (!welcomed) {
  585. printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
  586. novmerge ? "disabled" : "enabled");
  587. welcomed = 1;
  588. }
  589. return tbl;
  590. }
  591. void iommu_free_table(struct iommu_table *tbl, const char *node_name)
  592. {
  593. unsigned long bitmap_sz;
  594. unsigned int order;
  595. if (!tbl)
  596. return;
  597. if (!tbl->it_map) {
  598. kfree(tbl);
  599. return;
  600. }
  601. /*
  602. * In case we have reserved the first bit, we should not emit
  603. * the warning below.
  604. */
  605. if (tbl->it_offset == 0)
  606. clear_bit(0, tbl->it_map);
  607. /* verify that table contains no entries */
  608. if (!bitmap_empty(tbl->it_map, tbl->it_size))
  609. pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name);
  610. /* calculate bitmap size in bytes */
  611. bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
  612. /* free bitmap */
  613. order = get_order(bitmap_sz);
  614. free_pages((unsigned long) tbl->it_map, order);
  615. /* free table */
  616. kfree(tbl);
  617. }
  618. /* Creates TCEs for a user provided buffer. The user buffer must be
  619. * contiguous real kernel storage (not vmalloc). The address passed here
  620. * comprises a page address and offset into that page. The dma_addr_t
  621. * returned will point to the same byte within the page as was passed in.
  622. */
  623. dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
  624. struct page *page, unsigned long offset, size_t size,
  625. unsigned long mask, enum dma_data_direction direction,
  626. unsigned long attrs)
  627. {
  628. dma_addr_t dma_handle = DMA_ERROR_CODE;
  629. void *vaddr;
  630. unsigned long uaddr;
  631. unsigned int npages, align;
  632. BUG_ON(direction == DMA_NONE);
  633. vaddr = page_address(page) + offset;
  634. uaddr = (unsigned long)vaddr;
  635. npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
  636. if (tbl) {
  637. align = 0;
  638. if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
  639. ((unsigned long)vaddr & ~PAGE_MASK) == 0)
  640. align = PAGE_SHIFT - tbl->it_page_shift;
  641. dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
  642. mask >> tbl->it_page_shift, align,
  643. attrs);
  644. if (dma_handle == DMA_ERROR_CODE) {
  645. if (!(attrs & DMA_ATTR_NO_WARN) &&
  646. printk_ratelimit()) {
  647. dev_info(dev, "iommu_alloc failed, tbl %p "
  648. "vaddr %p npages %d\n", tbl, vaddr,
  649. npages);
  650. }
  651. } else
  652. dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
  653. }
  654. return dma_handle;
  655. }
  656. void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
  657. size_t size, enum dma_data_direction direction,
  658. unsigned long attrs)
  659. {
  660. unsigned int npages;
  661. BUG_ON(direction == DMA_NONE);
  662. if (tbl) {
  663. npages = iommu_num_pages(dma_handle, size,
  664. IOMMU_PAGE_SIZE(tbl));
  665. iommu_free(tbl, dma_handle, npages);
  666. }
  667. }
  668. /* Allocates a contiguous real buffer and creates mappings over it.
  669. * Returns the virtual address of the buffer and sets dma_handle
  670. * to the dma address (mapping) of the first page.
  671. */
  672. void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
  673. size_t size, dma_addr_t *dma_handle,
  674. unsigned long mask, gfp_t flag, int node)
  675. {
  676. void *ret = NULL;
  677. dma_addr_t mapping;
  678. unsigned int order;
  679. unsigned int nio_pages, io_order;
  680. struct page *page;
  681. size = PAGE_ALIGN(size);
  682. order = get_order(size);
  683. /*
  684. * Client asked for way too much space. This is checked later
  685. * anyway. It is easier to debug here for the drivers than in
  686. * the tce tables.
  687. */
  688. if (order >= IOMAP_MAX_ORDER) {
  689. dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
  690. size);
  691. return NULL;
  692. }
  693. if (!tbl)
  694. return NULL;
  695. /* Alloc enough pages (and possibly more) */
  696. page = alloc_pages_node(node, flag, order);
  697. if (!page)
  698. return NULL;
  699. ret = page_address(page);
  700. memset(ret, 0, size);
  701. /* Set up tces to cover the allocated range */
  702. nio_pages = size >> tbl->it_page_shift;
  703. io_order = get_iommu_order(size, tbl);
  704. mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
  705. mask >> tbl->it_page_shift, io_order, 0);
  706. if (mapping == DMA_ERROR_CODE) {
  707. free_pages((unsigned long)ret, order);
  708. return NULL;
  709. }
  710. *dma_handle = mapping;
  711. return ret;
  712. }
  713. void iommu_free_coherent(struct iommu_table *tbl, size_t size,
  714. void *vaddr, dma_addr_t dma_handle)
  715. {
  716. if (tbl) {
  717. unsigned int nio_pages;
  718. size = PAGE_ALIGN(size);
  719. nio_pages = size >> tbl->it_page_shift;
  720. iommu_free(tbl, dma_handle, nio_pages);
  721. size = PAGE_ALIGN(size);
  722. free_pages((unsigned long)vaddr, get_order(size));
  723. }
  724. }
  725. unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
  726. {
  727. switch (dir) {
  728. case DMA_BIDIRECTIONAL:
  729. return TCE_PCI_READ | TCE_PCI_WRITE;
  730. case DMA_FROM_DEVICE:
  731. return TCE_PCI_WRITE;
  732. case DMA_TO_DEVICE:
  733. return TCE_PCI_READ;
  734. default:
  735. return 0;
  736. }
  737. }
  738. EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
  739. #ifdef CONFIG_IOMMU_API
  740. /*
  741. * SPAPR TCE API
  742. */
  743. static void group_release(void *iommu_data)
  744. {
  745. struct iommu_table_group *table_group = iommu_data;
  746. table_group->group = NULL;
  747. }
  748. void iommu_register_group(struct iommu_table_group *table_group,
  749. int pci_domain_number, unsigned long pe_num)
  750. {
  751. struct iommu_group *grp;
  752. char *name;
  753. grp = iommu_group_alloc();
  754. if (IS_ERR(grp)) {
  755. pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
  756. PTR_ERR(grp));
  757. return;
  758. }
  759. table_group->group = grp;
  760. iommu_group_set_iommudata(grp, table_group, group_release);
  761. name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
  762. pci_domain_number, pe_num);
  763. if (!name)
  764. return;
  765. iommu_group_set_name(grp, name);
  766. kfree(name);
  767. }
  768. enum dma_data_direction iommu_tce_direction(unsigned long tce)
  769. {
  770. if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
  771. return DMA_BIDIRECTIONAL;
  772. else if (tce & TCE_PCI_READ)
  773. return DMA_TO_DEVICE;
  774. else if (tce & TCE_PCI_WRITE)
  775. return DMA_FROM_DEVICE;
  776. else
  777. return DMA_NONE;
  778. }
  779. EXPORT_SYMBOL_GPL(iommu_tce_direction);
  780. void iommu_flush_tce(struct iommu_table *tbl)
  781. {
  782. /* Flush/invalidate TLB caches if necessary */
  783. if (tbl->it_ops->flush)
  784. tbl->it_ops->flush(tbl);
  785. /* Make sure updates are seen by hardware */
  786. mb();
  787. }
  788. EXPORT_SYMBOL_GPL(iommu_flush_tce);
  789. int iommu_tce_clear_param_check(struct iommu_table *tbl,
  790. unsigned long ioba, unsigned long tce_value,
  791. unsigned long npages)
  792. {
  793. /* tbl->it_ops->clear() does not support any value but 0 */
  794. if (tce_value)
  795. return -EINVAL;
  796. if (ioba & ~IOMMU_PAGE_MASK(tbl))
  797. return -EINVAL;
  798. ioba >>= tbl->it_page_shift;
  799. if (ioba < tbl->it_offset)
  800. return -EINVAL;
  801. if ((ioba + npages) > (tbl->it_offset + tbl->it_size))
  802. return -EINVAL;
  803. return 0;
  804. }
  805. EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check);
  806. int iommu_tce_put_param_check(struct iommu_table *tbl,
  807. unsigned long ioba, unsigned long tce)
  808. {
  809. if (tce & ~IOMMU_PAGE_MASK(tbl))
  810. return -EINVAL;
  811. if (ioba & ~IOMMU_PAGE_MASK(tbl))
  812. return -EINVAL;
  813. ioba >>= tbl->it_page_shift;
  814. if (ioba < tbl->it_offset)
  815. return -EINVAL;
  816. if ((ioba + 1) > (tbl->it_offset + tbl->it_size))
  817. return -EINVAL;
  818. return 0;
  819. }
  820. EXPORT_SYMBOL_GPL(iommu_tce_put_param_check);
  821. long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
  822. unsigned long *hpa, enum dma_data_direction *direction)
  823. {
  824. long ret;
  825. ret = tbl->it_ops->exchange(tbl, entry, hpa, direction);
  826. if (!ret && ((*direction == DMA_FROM_DEVICE) ||
  827. (*direction == DMA_BIDIRECTIONAL)))
  828. SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
  829. /* if (unlikely(ret))
  830. pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
  831. __func__, hwaddr, entry << tbl->it_page_shift,
  832. hwaddr, ret); */
  833. return ret;
  834. }
  835. EXPORT_SYMBOL_GPL(iommu_tce_xchg);
  836. int iommu_take_ownership(struct iommu_table *tbl)
  837. {
  838. unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
  839. int ret = 0;
  840. /*
  841. * VFIO does not control TCE entries allocation and the guest
  842. * can write new TCEs on top of existing ones so iommu_tce_build()
  843. * must be able to release old pages. This functionality
  844. * requires exchange() callback defined so if it is not
  845. * implemented, we disallow taking ownership over the table.
  846. */
  847. if (!tbl->it_ops->exchange)
  848. return -EINVAL;
  849. spin_lock_irqsave(&tbl->large_pool.lock, flags);
  850. for (i = 0; i < tbl->nr_pools; i++)
  851. spin_lock(&tbl->pools[i].lock);
  852. if (tbl->it_offset == 0)
  853. clear_bit(0, tbl->it_map);
  854. if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
  855. pr_err("iommu_tce: it_map is not empty");
  856. ret = -EBUSY;
  857. /* Restore bit#0 set by iommu_init_table() */
  858. if (tbl->it_offset == 0)
  859. set_bit(0, tbl->it_map);
  860. } else {
  861. memset(tbl->it_map, 0xff, sz);
  862. }
  863. for (i = 0; i < tbl->nr_pools; i++)
  864. spin_unlock(&tbl->pools[i].lock);
  865. spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
  866. return ret;
  867. }
  868. EXPORT_SYMBOL_GPL(iommu_take_ownership);
  869. void iommu_release_ownership(struct iommu_table *tbl)
  870. {
  871. unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
  872. spin_lock_irqsave(&tbl->large_pool.lock, flags);
  873. for (i = 0; i < tbl->nr_pools; i++)
  874. spin_lock(&tbl->pools[i].lock);
  875. memset(tbl->it_map, 0, sz);
  876. /* Restore bit#0 set by iommu_init_table() */
  877. if (tbl->it_offset == 0)
  878. set_bit(0, tbl->it_map);
  879. for (i = 0; i < tbl->nr_pools; i++)
  880. spin_unlock(&tbl->pools[i].lock);
  881. spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
  882. }
  883. EXPORT_SYMBOL_GPL(iommu_release_ownership);
  884. int iommu_add_device(struct device *dev)
  885. {
  886. struct iommu_table *tbl;
  887. struct iommu_table_group_link *tgl;
  888. /*
  889. * The sysfs entries should be populated before
  890. * binding IOMMU group. If sysfs entries isn't
  891. * ready, we simply bail.
  892. */
  893. if (!device_is_registered(dev))
  894. return -ENOENT;
  895. if (dev->iommu_group) {
  896. pr_debug("%s: Skipping device %s with iommu group %d\n",
  897. __func__, dev_name(dev),
  898. iommu_group_id(dev->iommu_group));
  899. return -EBUSY;
  900. }
  901. tbl = get_iommu_table_base(dev);
  902. if (!tbl) {
  903. pr_debug("%s: Skipping device %s with no tbl\n",
  904. __func__, dev_name(dev));
  905. return 0;
  906. }
  907. tgl = list_first_entry_or_null(&tbl->it_group_list,
  908. struct iommu_table_group_link, next);
  909. if (!tgl) {
  910. pr_debug("%s: Skipping device %s with no group\n",
  911. __func__, dev_name(dev));
  912. return 0;
  913. }
  914. pr_debug("%s: Adding %s to iommu group %d\n",
  915. __func__, dev_name(dev),
  916. iommu_group_id(tgl->table_group->group));
  917. if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
  918. pr_err("%s: Invalid IOMMU page size %lx (%lx) on %s\n",
  919. __func__, IOMMU_PAGE_SIZE(tbl),
  920. PAGE_SIZE, dev_name(dev));
  921. return -EINVAL;
  922. }
  923. return iommu_group_add_device(tgl->table_group->group, dev);
  924. }
  925. EXPORT_SYMBOL_GPL(iommu_add_device);
  926. void iommu_del_device(struct device *dev)
  927. {
  928. /*
  929. * Some devices might not have IOMMU table and group
  930. * and we needn't detach them from the associated
  931. * IOMMU groups
  932. */
  933. if (!dev->iommu_group) {
  934. pr_debug("iommu_tce: skipping device %s with no tbl\n",
  935. dev_name(dev));
  936. return;
  937. }
  938. iommu_group_remove_device(dev);
  939. }
  940. EXPORT_SYMBOL_GPL(iommu_del_device);
  941. static int tce_iommu_bus_notifier(struct notifier_block *nb,
  942. unsigned long action, void *data)
  943. {
  944. struct device *dev = data;
  945. switch (action) {
  946. case BUS_NOTIFY_ADD_DEVICE:
  947. return iommu_add_device(dev);
  948. case BUS_NOTIFY_DEL_DEVICE:
  949. if (dev->iommu_group)
  950. iommu_del_device(dev);
  951. return 0;
  952. default:
  953. return 0;
  954. }
  955. }
  956. static struct notifier_block tce_iommu_bus_nb = {
  957. .notifier_call = tce_iommu_bus_notifier,
  958. };
  959. int __init tce_iommu_bus_notifier_init(void)
  960. {
  961. bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
  962. return 0;
  963. }
  964. #endif /* CONFIG_IOMMU_API */