vfio_iommu_spapr_tce.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292
  1. /*
  2. * VFIO: IOMMU DMA mapping support for TCE on POWER
  3. *
  4. * Copyright (C) 2013 IBM Corp. All rights reserved.
  5. * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * Derived from original vfio_iommu_type1.c:
  12. * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
  13. * Author: Alex Williamson <alex.williamson@redhat.com>
  14. */
  15. #include <linux/module.h>
  16. #include <linux/pci.h>
  17. #include <linux/slab.h>
  18. #include <linux/uaccess.h>
  19. #include <linux/err.h>
  20. #include <linux/vfio.h>
  21. #include <linux/vmalloc.h>
  22. #include <asm/iommu.h>
  23. #include <asm/tce.h>
  24. #include <asm/mmu_context.h>
  25. #define DRIVER_VERSION "0.1"
  26. #define DRIVER_AUTHOR "aik@ozlabs.ru"
  27. #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
  28. static void tce_iommu_detach_group(void *iommu_data,
  29. struct iommu_group *iommu_group);
  30. static long try_increment_locked_vm(long npages)
  31. {
  32. long ret = 0, locked, lock_limit;
  33. if (!current || !current->mm)
  34. return -ESRCH; /* process exited */
  35. if (!npages)
  36. return 0;
  37. down_write(&current->mm->mmap_sem);
  38. locked = current->mm->locked_vm + npages;
  39. lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  40. if (locked > lock_limit && !capable(CAP_IPC_LOCK))
  41. ret = -ENOMEM;
  42. else
  43. current->mm->locked_vm += npages;
  44. pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
  45. npages << PAGE_SHIFT,
  46. current->mm->locked_vm << PAGE_SHIFT,
  47. rlimit(RLIMIT_MEMLOCK),
  48. ret ? " - exceeded" : "");
  49. up_write(&current->mm->mmap_sem);
  50. return ret;
  51. }
  52. static void decrement_locked_vm(long npages)
  53. {
  54. if (!current || !current->mm || !npages)
  55. return; /* process exited */
  56. down_write(&current->mm->mmap_sem);
  57. if (WARN_ON_ONCE(npages > current->mm->locked_vm))
  58. npages = current->mm->locked_vm;
  59. current->mm->locked_vm -= npages;
  60. pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
  61. npages << PAGE_SHIFT,
  62. current->mm->locked_vm << PAGE_SHIFT,
  63. rlimit(RLIMIT_MEMLOCK));
  64. up_write(&current->mm->mmap_sem);
  65. }
  66. /*
  67. * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
  68. *
  69. * This code handles mapping and unmapping of user data buffers
  70. * into DMA'ble space using the IOMMU
  71. */
  72. struct tce_iommu_group {
  73. struct list_head next;
  74. struct iommu_group *grp;
  75. };
  76. /*
  77. * The container descriptor supports only a single group per container.
  78. * Required by the API as the container is not supplied with the IOMMU group
  79. * at the moment of initialization.
  80. */
  81. struct tce_container {
  82. struct mutex lock;
  83. bool enabled;
  84. bool v2;
  85. unsigned long locked_pages;
  86. struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
  87. struct list_head group_list;
  88. };
  89. static long tce_iommu_unregister_pages(struct tce_container *container,
  90. __u64 vaddr, __u64 size)
  91. {
  92. struct mm_iommu_table_group_mem_t *mem;
  93. if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
  94. return -EINVAL;
  95. mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT);
  96. if (!mem)
  97. return -ENOENT;
  98. return mm_iommu_put(mem);
  99. }
  100. static long tce_iommu_register_pages(struct tce_container *container,
  101. __u64 vaddr, __u64 size)
  102. {
  103. long ret = 0;
  104. struct mm_iommu_table_group_mem_t *mem = NULL;
  105. unsigned long entries = size >> PAGE_SHIFT;
  106. if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
  107. ((vaddr + size) < vaddr))
  108. return -EINVAL;
  109. ret = mm_iommu_get(vaddr, entries, &mem);
  110. if (ret)
  111. return ret;
  112. container->enabled = true;
  113. return 0;
  114. }
  115. static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
  116. {
  117. unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
  118. tbl->it_size, PAGE_SIZE);
  119. unsigned long *uas;
  120. long ret;
  121. BUG_ON(tbl->it_userspace);
  122. ret = try_increment_locked_vm(cb >> PAGE_SHIFT);
  123. if (ret)
  124. return ret;
  125. uas = vzalloc(cb);
  126. if (!uas) {
  127. decrement_locked_vm(cb >> PAGE_SHIFT);
  128. return -ENOMEM;
  129. }
  130. tbl->it_userspace = uas;
  131. return 0;
  132. }
  133. static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
  134. {
  135. unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
  136. tbl->it_size, PAGE_SIZE);
  137. if (!tbl->it_userspace)
  138. return;
  139. vfree(tbl->it_userspace);
  140. tbl->it_userspace = NULL;
  141. decrement_locked_vm(cb >> PAGE_SHIFT);
  142. }
  143. static bool tce_page_is_contained(struct page *page, unsigned page_shift)
  144. {
  145. /*
  146. * Check that the TCE table granularity is not bigger than the size of
  147. * a page we just found. Otherwise the hardware can get access to
  148. * a bigger memory chunk that it should.
  149. */
  150. return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
  151. }
  152. static inline bool tce_groups_attached(struct tce_container *container)
  153. {
  154. return !list_empty(&container->group_list);
  155. }
  156. static long tce_iommu_find_table(struct tce_container *container,
  157. phys_addr_t ioba, struct iommu_table **ptbl)
  158. {
  159. long i;
  160. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  161. struct iommu_table *tbl = container->tables[i];
  162. if (tbl) {
  163. unsigned long entry = ioba >> tbl->it_page_shift;
  164. unsigned long start = tbl->it_offset;
  165. unsigned long end = start + tbl->it_size;
  166. if ((start <= entry) && (entry < end)) {
  167. *ptbl = tbl;
  168. return i;
  169. }
  170. }
  171. }
  172. return -1;
  173. }
  174. static int tce_iommu_find_free_table(struct tce_container *container)
  175. {
  176. int i;
  177. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  178. if (!container->tables[i])
  179. return i;
  180. }
  181. return -ENOSPC;
  182. }
  183. static int tce_iommu_enable(struct tce_container *container)
  184. {
  185. int ret = 0;
  186. unsigned long locked;
  187. struct iommu_table_group *table_group;
  188. struct tce_iommu_group *tcegrp;
  189. if (!current->mm)
  190. return -ESRCH; /* process exited */
  191. if (container->enabled)
  192. return -EBUSY;
  193. /*
  194. * When userspace pages are mapped into the IOMMU, they are effectively
  195. * locked memory, so, theoretically, we need to update the accounting
  196. * of locked pages on each map and unmap. For powerpc, the map unmap
  197. * paths can be very hot, though, and the accounting would kill
  198. * performance, especially since it would be difficult to impossible
  199. * to handle the accounting in real mode only.
  200. *
  201. * To address that, rather than precisely accounting every page, we
  202. * instead account for a worst case on locked memory when the iommu is
  203. * enabled and disabled. The worst case upper bound on locked memory
  204. * is the size of the whole iommu window, which is usually relatively
  205. * small (compared to total memory sizes) on POWER hardware.
  206. *
  207. * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
  208. * that would effectively kill the guest at random points, much better
  209. * enforcing the limit based on the max that the guest can map.
  210. *
  211. * Unfortunately at the moment it counts whole tables, no matter how
  212. * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
  213. * each with 2GB DMA window, 8GB will be counted here. The reason for
  214. * this is that we cannot tell here the amount of RAM used by the guest
  215. * as this information is only available from KVM and VFIO is
  216. * KVM agnostic.
  217. *
  218. * So we do not allow enabling a container without a group attached
  219. * as there is no way to know how much we should increment
  220. * the locked_vm counter.
  221. */
  222. if (!tce_groups_attached(container))
  223. return -ENODEV;
  224. tcegrp = list_first_entry(&container->group_list,
  225. struct tce_iommu_group, next);
  226. table_group = iommu_group_get_iommudata(tcegrp->grp);
  227. if (!table_group)
  228. return -ENODEV;
  229. if (!table_group->tce32_size)
  230. return -EPERM;
  231. locked = table_group->tce32_size >> PAGE_SHIFT;
  232. ret = try_increment_locked_vm(locked);
  233. if (ret)
  234. return ret;
  235. container->locked_pages = locked;
  236. container->enabled = true;
  237. return ret;
  238. }
  239. static void tce_iommu_disable(struct tce_container *container)
  240. {
  241. if (!container->enabled)
  242. return;
  243. container->enabled = false;
  244. if (!current->mm)
  245. return;
  246. decrement_locked_vm(container->locked_pages);
  247. }
  248. static void *tce_iommu_open(unsigned long arg)
  249. {
  250. struct tce_container *container;
  251. if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
  252. pr_err("tce_vfio: Wrong IOMMU type\n");
  253. return ERR_PTR(-EINVAL);
  254. }
  255. container = kzalloc(sizeof(*container), GFP_KERNEL);
  256. if (!container)
  257. return ERR_PTR(-ENOMEM);
  258. mutex_init(&container->lock);
  259. INIT_LIST_HEAD_RCU(&container->group_list);
  260. container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
  261. return container;
  262. }
  263. static int tce_iommu_clear(struct tce_container *container,
  264. struct iommu_table *tbl,
  265. unsigned long entry, unsigned long pages);
  266. static void tce_iommu_free_table(struct iommu_table *tbl);
  267. static void tce_iommu_release(void *iommu_data)
  268. {
  269. struct tce_container *container = iommu_data;
  270. struct iommu_table_group *table_group;
  271. struct tce_iommu_group *tcegrp;
  272. long i;
  273. while (tce_groups_attached(container)) {
  274. tcegrp = list_first_entry(&container->group_list,
  275. struct tce_iommu_group, next);
  276. table_group = iommu_group_get_iommudata(tcegrp->grp);
  277. tce_iommu_detach_group(iommu_data, tcegrp->grp);
  278. }
  279. /*
  280. * If VFIO created a table, it was not disposed
  281. * by tce_iommu_detach_group() so do it now.
  282. */
  283. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  284. struct iommu_table *tbl = container->tables[i];
  285. if (!tbl)
  286. continue;
  287. tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
  288. tce_iommu_free_table(tbl);
  289. }
  290. tce_iommu_disable(container);
  291. mutex_destroy(&container->lock);
  292. kfree(container);
  293. }
  294. static void tce_iommu_unuse_page(struct tce_container *container,
  295. unsigned long hpa)
  296. {
  297. struct page *page;
  298. page = pfn_to_page(hpa >> PAGE_SHIFT);
  299. put_page(page);
  300. }
  301. static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
  302. unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
  303. {
  304. long ret = 0;
  305. struct mm_iommu_table_group_mem_t *mem;
  306. mem = mm_iommu_lookup(tce, size);
  307. if (!mem)
  308. return -EINVAL;
  309. ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
  310. if (ret)
  311. return -EINVAL;
  312. *pmem = mem;
  313. return 0;
  314. }
  315. static void tce_iommu_unuse_page_v2(struct iommu_table *tbl,
  316. unsigned long entry)
  317. {
  318. struct mm_iommu_table_group_mem_t *mem = NULL;
  319. int ret;
  320. unsigned long hpa = 0;
  321. unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
  322. if (!pua || !current || !current->mm)
  323. return;
  324. ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl),
  325. &hpa, &mem);
  326. if (ret)
  327. pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
  328. __func__, *pua, entry, ret);
  329. if (mem)
  330. mm_iommu_mapped_dec(mem);
  331. *pua = 0;
  332. }
  333. static int tce_iommu_clear(struct tce_container *container,
  334. struct iommu_table *tbl,
  335. unsigned long entry, unsigned long pages)
  336. {
  337. unsigned long oldhpa;
  338. long ret;
  339. enum dma_data_direction direction;
  340. for ( ; pages; --pages, ++entry) {
  341. direction = DMA_NONE;
  342. oldhpa = 0;
  343. ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
  344. if (ret)
  345. continue;
  346. if (direction == DMA_NONE)
  347. continue;
  348. if (container->v2) {
  349. tce_iommu_unuse_page_v2(tbl, entry);
  350. continue;
  351. }
  352. tce_iommu_unuse_page(container, oldhpa);
  353. }
  354. return 0;
  355. }
  356. static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
  357. {
  358. struct page *page = NULL;
  359. enum dma_data_direction direction = iommu_tce_direction(tce);
  360. if (get_user_pages_fast(tce & PAGE_MASK, 1,
  361. direction != DMA_TO_DEVICE, &page) != 1)
  362. return -EFAULT;
  363. *hpa = __pa((unsigned long) page_address(page));
  364. return 0;
  365. }
  366. static long tce_iommu_build(struct tce_container *container,
  367. struct iommu_table *tbl,
  368. unsigned long entry, unsigned long tce, unsigned long pages,
  369. enum dma_data_direction direction)
  370. {
  371. long i, ret = 0;
  372. struct page *page;
  373. unsigned long hpa;
  374. enum dma_data_direction dirtmp;
  375. for (i = 0; i < pages; ++i) {
  376. unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
  377. ret = tce_iommu_use_page(tce, &hpa);
  378. if (ret)
  379. break;
  380. page = pfn_to_page(hpa >> PAGE_SHIFT);
  381. if (!tce_page_is_contained(page, tbl->it_page_shift)) {
  382. ret = -EPERM;
  383. break;
  384. }
  385. hpa |= offset;
  386. dirtmp = direction;
  387. ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
  388. if (ret) {
  389. tce_iommu_unuse_page(container, hpa);
  390. pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
  391. __func__, entry << tbl->it_page_shift,
  392. tce, ret);
  393. break;
  394. }
  395. if (dirtmp != DMA_NONE)
  396. tce_iommu_unuse_page(container, hpa);
  397. tce += IOMMU_PAGE_SIZE(tbl);
  398. }
  399. if (ret)
  400. tce_iommu_clear(container, tbl, entry, i);
  401. return ret;
  402. }
  403. static long tce_iommu_build_v2(struct tce_container *container,
  404. struct iommu_table *tbl,
  405. unsigned long entry, unsigned long tce, unsigned long pages,
  406. enum dma_data_direction direction)
  407. {
  408. long i, ret = 0;
  409. struct page *page;
  410. unsigned long hpa;
  411. enum dma_data_direction dirtmp;
  412. for (i = 0; i < pages; ++i) {
  413. struct mm_iommu_table_group_mem_t *mem = NULL;
  414. unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
  415. entry + i);
  416. ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl),
  417. &hpa, &mem);
  418. if (ret)
  419. break;
  420. page = pfn_to_page(hpa >> PAGE_SHIFT);
  421. if (!tce_page_is_contained(page, tbl->it_page_shift)) {
  422. ret = -EPERM;
  423. break;
  424. }
  425. /* Preserve offset within IOMMU page */
  426. hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
  427. dirtmp = direction;
  428. /* The registered region is being unregistered */
  429. if (mm_iommu_mapped_inc(mem))
  430. break;
  431. ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
  432. if (ret) {
  433. /* dirtmp cannot be DMA_NONE here */
  434. tce_iommu_unuse_page_v2(tbl, entry + i);
  435. pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
  436. __func__, entry << tbl->it_page_shift,
  437. tce, ret);
  438. break;
  439. }
  440. if (dirtmp != DMA_NONE)
  441. tce_iommu_unuse_page_v2(tbl, entry + i);
  442. *pua = tce;
  443. tce += IOMMU_PAGE_SIZE(tbl);
  444. }
  445. if (ret)
  446. tce_iommu_clear(container, tbl, entry, i);
  447. return ret;
  448. }
  449. static long tce_iommu_create_table(struct tce_container *container,
  450. struct iommu_table_group *table_group,
  451. int num,
  452. __u32 page_shift,
  453. __u64 window_size,
  454. __u32 levels,
  455. struct iommu_table **ptbl)
  456. {
  457. long ret, table_size;
  458. table_size = table_group->ops->get_table_size(page_shift, window_size,
  459. levels);
  460. if (!table_size)
  461. return -EINVAL;
  462. ret = try_increment_locked_vm(table_size >> PAGE_SHIFT);
  463. if (ret)
  464. return ret;
  465. ret = table_group->ops->create_table(table_group, num,
  466. page_shift, window_size, levels, ptbl);
  467. WARN_ON(!ret && !(*ptbl)->it_ops->free);
  468. WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
  469. if (!ret && container->v2) {
  470. ret = tce_iommu_userspace_view_alloc(*ptbl);
  471. if (ret)
  472. (*ptbl)->it_ops->free(*ptbl);
  473. }
  474. if (ret)
  475. decrement_locked_vm(table_size >> PAGE_SHIFT);
  476. return ret;
  477. }
  478. static void tce_iommu_free_table(struct iommu_table *tbl)
  479. {
  480. unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
  481. tce_iommu_userspace_view_free(tbl);
  482. tbl->it_ops->free(tbl);
  483. decrement_locked_vm(pages);
  484. }
  485. static long tce_iommu_create_window(struct tce_container *container,
  486. __u32 page_shift, __u64 window_size, __u32 levels,
  487. __u64 *start_addr)
  488. {
  489. struct tce_iommu_group *tcegrp;
  490. struct iommu_table_group *table_group;
  491. struct iommu_table *tbl = NULL;
  492. long ret, num;
  493. num = tce_iommu_find_free_table(container);
  494. if (num < 0)
  495. return num;
  496. /* Get the first group for ops::create_table */
  497. tcegrp = list_first_entry(&container->group_list,
  498. struct tce_iommu_group, next);
  499. table_group = iommu_group_get_iommudata(tcegrp->grp);
  500. if (!table_group)
  501. return -EFAULT;
  502. if (!(table_group->pgsizes & (1ULL << page_shift)))
  503. return -EINVAL;
  504. if (!table_group->ops->set_window || !table_group->ops->unset_window ||
  505. !table_group->ops->get_table_size ||
  506. !table_group->ops->create_table)
  507. return -EPERM;
  508. /* Create TCE table */
  509. ret = tce_iommu_create_table(container, table_group, num,
  510. page_shift, window_size, levels, &tbl);
  511. if (ret)
  512. return ret;
  513. BUG_ON(!tbl->it_ops->free);
  514. /*
  515. * Program the table to every group.
  516. * Groups have been tested for compatibility at the attach time.
  517. */
  518. list_for_each_entry(tcegrp, &container->group_list, next) {
  519. table_group = iommu_group_get_iommudata(tcegrp->grp);
  520. ret = table_group->ops->set_window(table_group, num, tbl);
  521. if (ret)
  522. goto unset_exit;
  523. }
  524. container->tables[num] = tbl;
  525. /* Return start address assigned by platform in create_table() */
  526. *start_addr = tbl->it_offset << tbl->it_page_shift;
  527. return 0;
  528. unset_exit:
  529. list_for_each_entry(tcegrp, &container->group_list, next) {
  530. table_group = iommu_group_get_iommudata(tcegrp->grp);
  531. table_group->ops->unset_window(table_group, num);
  532. }
  533. tce_iommu_free_table(tbl);
  534. return ret;
  535. }
  536. static long tce_iommu_remove_window(struct tce_container *container,
  537. __u64 start_addr)
  538. {
  539. struct iommu_table_group *table_group = NULL;
  540. struct iommu_table *tbl;
  541. struct tce_iommu_group *tcegrp;
  542. int num;
  543. num = tce_iommu_find_table(container, start_addr, &tbl);
  544. if (num < 0)
  545. return -EINVAL;
  546. BUG_ON(!tbl->it_size);
  547. /* Detach groups from IOMMUs */
  548. list_for_each_entry(tcegrp, &container->group_list, next) {
  549. table_group = iommu_group_get_iommudata(tcegrp->grp);
  550. /*
  551. * SPAPR TCE IOMMU exposes the default DMA window to
  552. * the guest via dma32_window_start/size of
  553. * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
  554. * the userspace to remove this window, some do not so
  555. * here we check for the platform capability.
  556. */
  557. if (!table_group->ops || !table_group->ops->unset_window)
  558. return -EPERM;
  559. table_group->ops->unset_window(table_group, num);
  560. }
  561. /* Free table */
  562. tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
  563. tce_iommu_free_table(tbl);
  564. container->tables[num] = NULL;
  565. return 0;
  566. }
  567. static long tce_iommu_ioctl(void *iommu_data,
  568. unsigned int cmd, unsigned long arg)
  569. {
  570. struct tce_container *container = iommu_data;
  571. unsigned long minsz, ddwsz;
  572. long ret;
  573. switch (cmd) {
  574. case VFIO_CHECK_EXTENSION:
  575. switch (arg) {
  576. case VFIO_SPAPR_TCE_IOMMU:
  577. case VFIO_SPAPR_TCE_v2_IOMMU:
  578. ret = 1;
  579. break;
  580. default:
  581. ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
  582. break;
  583. }
  584. return (ret < 0) ? 0 : ret;
  585. case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
  586. struct vfio_iommu_spapr_tce_info info;
  587. struct tce_iommu_group *tcegrp;
  588. struct iommu_table_group *table_group;
  589. if (!tce_groups_attached(container))
  590. return -ENXIO;
  591. tcegrp = list_first_entry(&container->group_list,
  592. struct tce_iommu_group, next);
  593. table_group = iommu_group_get_iommudata(tcegrp->grp);
  594. if (!table_group)
  595. return -ENXIO;
  596. minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
  597. dma32_window_size);
  598. if (copy_from_user(&info, (void __user *)arg, minsz))
  599. return -EFAULT;
  600. if (info.argsz < minsz)
  601. return -EINVAL;
  602. info.dma32_window_start = table_group->tce32_start;
  603. info.dma32_window_size = table_group->tce32_size;
  604. info.flags = 0;
  605. memset(&info.ddw, 0, sizeof(info.ddw));
  606. if (table_group->max_dynamic_windows_supported &&
  607. container->v2) {
  608. info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
  609. info.ddw.pgsizes = table_group->pgsizes;
  610. info.ddw.max_dynamic_windows_supported =
  611. table_group->max_dynamic_windows_supported;
  612. info.ddw.levels = table_group->max_levels;
  613. }
  614. ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
  615. if (info.argsz >= ddwsz)
  616. minsz = ddwsz;
  617. if (copy_to_user((void __user *)arg, &info, minsz))
  618. return -EFAULT;
  619. return 0;
  620. }
  621. case VFIO_IOMMU_MAP_DMA: {
  622. struct vfio_iommu_type1_dma_map param;
  623. struct iommu_table *tbl = NULL;
  624. long num;
  625. enum dma_data_direction direction;
  626. if (!container->enabled)
  627. return -EPERM;
  628. minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
  629. if (copy_from_user(&param, (void __user *)arg, minsz))
  630. return -EFAULT;
  631. if (param.argsz < minsz)
  632. return -EINVAL;
  633. if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
  634. VFIO_DMA_MAP_FLAG_WRITE))
  635. return -EINVAL;
  636. num = tce_iommu_find_table(container, param.iova, &tbl);
  637. if (num < 0)
  638. return -ENXIO;
  639. if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
  640. (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
  641. return -EINVAL;
  642. /* iova is checked by the IOMMU API */
  643. if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
  644. if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
  645. direction = DMA_BIDIRECTIONAL;
  646. else
  647. direction = DMA_TO_DEVICE;
  648. } else {
  649. if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
  650. direction = DMA_FROM_DEVICE;
  651. else
  652. return -EINVAL;
  653. }
  654. ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
  655. if (ret)
  656. return ret;
  657. if (container->v2)
  658. ret = tce_iommu_build_v2(container, tbl,
  659. param.iova >> tbl->it_page_shift,
  660. param.vaddr,
  661. param.size >> tbl->it_page_shift,
  662. direction);
  663. else
  664. ret = tce_iommu_build(container, tbl,
  665. param.iova >> tbl->it_page_shift,
  666. param.vaddr,
  667. param.size >> tbl->it_page_shift,
  668. direction);
  669. iommu_flush_tce(tbl);
  670. return ret;
  671. }
  672. case VFIO_IOMMU_UNMAP_DMA: {
  673. struct vfio_iommu_type1_dma_unmap param;
  674. struct iommu_table *tbl = NULL;
  675. long num;
  676. if (!container->enabled)
  677. return -EPERM;
  678. minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
  679. size);
  680. if (copy_from_user(&param, (void __user *)arg, minsz))
  681. return -EFAULT;
  682. if (param.argsz < minsz)
  683. return -EINVAL;
  684. /* No flag is supported now */
  685. if (param.flags)
  686. return -EINVAL;
  687. num = tce_iommu_find_table(container, param.iova, &tbl);
  688. if (num < 0)
  689. return -ENXIO;
  690. if (param.size & ~IOMMU_PAGE_MASK(tbl))
  691. return -EINVAL;
  692. ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
  693. param.size >> tbl->it_page_shift);
  694. if (ret)
  695. return ret;
  696. ret = tce_iommu_clear(container, tbl,
  697. param.iova >> tbl->it_page_shift,
  698. param.size >> tbl->it_page_shift);
  699. iommu_flush_tce(tbl);
  700. return ret;
  701. }
  702. case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
  703. struct vfio_iommu_spapr_register_memory param;
  704. if (!container->v2)
  705. break;
  706. minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
  707. size);
  708. if (copy_from_user(&param, (void __user *)arg, minsz))
  709. return -EFAULT;
  710. if (param.argsz < minsz)
  711. return -EINVAL;
  712. /* No flag is supported now */
  713. if (param.flags)
  714. return -EINVAL;
  715. mutex_lock(&container->lock);
  716. ret = tce_iommu_register_pages(container, param.vaddr,
  717. param.size);
  718. mutex_unlock(&container->lock);
  719. return ret;
  720. }
  721. case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
  722. struct vfio_iommu_spapr_register_memory param;
  723. if (!container->v2)
  724. break;
  725. minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
  726. size);
  727. if (copy_from_user(&param, (void __user *)arg, minsz))
  728. return -EFAULT;
  729. if (param.argsz < minsz)
  730. return -EINVAL;
  731. /* No flag is supported now */
  732. if (param.flags)
  733. return -EINVAL;
  734. mutex_lock(&container->lock);
  735. ret = tce_iommu_unregister_pages(container, param.vaddr,
  736. param.size);
  737. mutex_unlock(&container->lock);
  738. return ret;
  739. }
  740. case VFIO_IOMMU_ENABLE:
  741. if (container->v2)
  742. break;
  743. mutex_lock(&container->lock);
  744. ret = tce_iommu_enable(container);
  745. mutex_unlock(&container->lock);
  746. return ret;
  747. case VFIO_IOMMU_DISABLE:
  748. if (container->v2)
  749. break;
  750. mutex_lock(&container->lock);
  751. tce_iommu_disable(container);
  752. mutex_unlock(&container->lock);
  753. return 0;
  754. case VFIO_EEH_PE_OP: {
  755. struct tce_iommu_group *tcegrp;
  756. ret = 0;
  757. list_for_each_entry(tcegrp, &container->group_list, next) {
  758. ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
  759. cmd, arg);
  760. if (ret)
  761. return ret;
  762. }
  763. return ret;
  764. }
  765. case VFIO_IOMMU_SPAPR_TCE_CREATE: {
  766. struct vfio_iommu_spapr_tce_create create;
  767. if (!container->v2)
  768. break;
  769. if (!tce_groups_attached(container))
  770. return -ENXIO;
  771. minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
  772. start_addr);
  773. if (copy_from_user(&create, (void __user *)arg, minsz))
  774. return -EFAULT;
  775. if (create.argsz < minsz)
  776. return -EINVAL;
  777. if (create.flags)
  778. return -EINVAL;
  779. mutex_lock(&container->lock);
  780. ret = tce_iommu_create_window(container, create.page_shift,
  781. create.window_size, create.levels,
  782. &create.start_addr);
  783. mutex_unlock(&container->lock);
  784. if (!ret && copy_to_user((void __user *)arg, &create, minsz))
  785. ret = -EFAULT;
  786. return ret;
  787. }
  788. case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
  789. struct vfio_iommu_spapr_tce_remove remove;
  790. if (!container->v2)
  791. break;
  792. if (!tce_groups_attached(container))
  793. return -ENXIO;
  794. minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
  795. start_addr);
  796. if (copy_from_user(&remove, (void __user *)arg, minsz))
  797. return -EFAULT;
  798. if (remove.argsz < minsz)
  799. return -EINVAL;
  800. if (remove.flags)
  801. return -EINVAL;
  802. mutex_lock(&container->lock);
  803. ret = tce_iommu_remove_window(container, remove.start_addr);
  804. mutex_unlock(&container->lock);
  805. return ret;
  806. }
  807. }
  808. return -ENOTTY;
  809. }
  810. static void tce_iommu_release_ownership(struct tce_container *container,
  811. struct iommu_table_group *table_group)
  812. {
  813. int i;
  814. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  815. struct iommu_table *tbl = container->tables[i];
  816. if (!tbl)
  817. continue;
  818. tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
  819. tce_iommu_userspace_view_free(tbl);
  820. if (tbl->it_map)
  821. iommu_release_ownership(tbl);
  822. container->tables[i] = NULL;
  823. }
  824. }
  825. static int tce_iommu_take_ownership(struct tce_container *container,
  826. struct iommu_table_group *table_group)
  827. {
  828. int i, j, rc = 0;
  829. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  830. struct iommu_table *tbl = table_group->tables[i];
  831. if (!tbl || !tbl->it_map)
  832. continue;
  833. rc = tce_iommu_userspace_view_alloc(tbl);
  834. if (!rc)
  835. rc = iommu_take_ownership(tbl);
  836. if (rc) {
  837. for (j = 0; j < i; ++j)
  838. iommu_release_ownership(
  839. table_group->tables[j]);
  840. return rc;
  841. }
  842. }
  843. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
  844. container->tables[i] = table_group->tables[i];
  845. return 0;
  846. }
  847. static void tce_iommu_release_ownership_ddw(struct tce_container *container,
  848. struct iommu_table_group *table_group)
  849. {
  850. long i;
  851. if (!table_group->ops->unset_window) {
  852. WARN_ON_ONCE(1);
  853. return;
  854. }
  855. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
  856. table_group->ops->unset_window(table_group, i);
  857. table_group->ops->release_ownership(table_group);
  858. }
  859. static long tce_iommu_take_ownership_ddw(struct tce_container *container,
  860. struct iommu_table_group *table_group)
  861. {
  862. long i, ret = 0;
  863. struct iommu_table *tbl = NULL;
  864. if (!table_group->ops->create_table || !table_group->ops->set_window ||
  865. !table_group->ops->release_ownership) {
  866. WARN_ON_ONCE(1);
  867. return -EFAULT;
  868. }
  869. table_group->ops->take_ownership(table_group);
  870. /*
  871. * If it the first group attached, check if there is
  872. * a default DMA window and create one if none as
  873. * the userspace expects it to exist.
  874. */
  875. if (!tce_groups_attached(container) && !container->tables[0]) {
  876. ret = tce_iommu_create_table(container,
  877. table_group,
  878. 0, /* window number */
  879. IOMMU_PAGE_SHIFT_4K,
  880. table_group->tce32_size,
  881. 1, /* default levels */
  882. &tbl);
  883. if (ret)
  884. goto release_exit;
  885. else
  886. container->tables[0] = tbl;
  887. }
  888. /* Set all windows to the new group */
  889. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  890. tbl = container->tables[i];
  891. if (!tbl)
  892. continue;
  893. /* Set the default window to a new group */
  894. ret = table_group->ops->set_window(table_group, i, tbl);
  895. if (ret)
  896. goto release_exit;
  897. }
  898. return 0;
  899. release_exit:
  900. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
  901. table_group->ops->unset_window(table_group, i);
  902. table_group->ops->release_ownership(table_group);
  903. return ret;
  904. }
  905. static int tce_iommu_attach_group(void *iommu_data,
  906. struct iommu_group *iommu_group)
  907. {
  908. int ret;
  909. struct tce_container *container = iommu_data;
  910. struct iommu_table_group *table_group;
  911. struct tce_iommu_group *tcegrp = NULL;
  912. mutex_lock(&container->lock);
  913. /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
  914. iommu_group_id(iommu_group), iommu_group); */
  915. table_group = iommu_group_get_iommudata(iommu_group);
  916. if (tce_groups_attached(container) && (!table_group->ops ||
  917. !table_group->ops->take_ownership ||
  918. !table_group->ops->release_ownership)) {
  919. ret = -EBUSY;
  920. goto unlock_exit;
  921. }
  922. /* Check if new group has the same iommu_ops (i.e. compatible) */
  923. list_for_each_entry(tcegrp, &container->group_list, next) {
  924. struct iommu_table_group *table_group_tmp;
  925. if (tcegrp->grp == iommu_group) {
  926. pr_warn("tce_vfio: Group %d is already attached\n",
  927. iommu_group_id(iommu_group));
  928. ret = -EBUSY;
  929. goto unlock_exit;
  930. }
  931. table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
  932. if (table_group_tmp->ops != table_group->ops) {
  933. pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
  934. iommu_group_id(iommu_group),
  935. iommu_group_id(tcegrp->grp));
  936. ret = -EPERM;
  937. goto unlock_exit;
  938. }
  939. }
  940. tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
  941. if (!tcegrp) {
  942. ret = -ENOMEM;
  943. goto unlock_exit;
  944. }
  945. if (!table_group->ops || !table_group->ops->take_ownership ||
  946. !table_group->ops->release_ownership)
  947. ret = tce_iommu_take_ownership(container, table_group);
  948. else
  949. ret = tce_iommu_take_ownership_ddw(container, table_group);
  950. if (!ret) {
  951. tcegrp->grp = iommu_group;
  952. list_add(&tcegrp->next, &container->group_list);
  953. }
  954. unlock_exit:
  955. if (ret && tcegrp)
  956. kfree(tcegrp);
  957. mutex_unlock(&container->lock);
  958. return ret;
  959. }
  960. static void tce_iommu_detach_group(void *iommu_data,
  961. struct iommu_group *iommu_group)
  962. {
  963. struct tce_container *container = iommu_data;
  964. struct iommu_table_group *table_group;
  965. bool found = false;
  966. struct tce_iommu_group *tcegrp;
  967. mutex_lock(&container->lock);
  968. list_for_each_entry(tcegrp, &container->group_list, next) {
  969. if (tcegrp->grp == iommu_group) {
  970. found = true;
  971. break;
  972. }
  973. }
  974. if (!found) {
  975. pr_warn("tce_vfio: detaching unattached group #%u\n",
  976. iommu_group_id(iommu_group));
  977. goto unlock_exit;
  978. }
  979. list_del(&tcegrp->next);
  980. kfree(tcegrp);
  981. table_group = iommu_group_get_iommudata(iommu_group);
  982. BUG_ON(!table_group);
  983. if (!table_group->ops || !table_group->ops->release_ownership)
  984. tce_iommu_release_ownership(container, table_group);
  985. else
  986. tce_iommu_release_ownership_ddw(container, table_group);
  987. unlock_exit:
  988. mutex_unlock(&container->lock);
  989. }
  990. const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
  991. .name = "iommu-vfio-powerpc",
  992. .owner = THIS_MODULE,
  993. .open = tce_iommu_open,
  994. .release = tce_iommu_release,
  995. .ioctl = tce_iommu_ioctl,
  996. .attach_group = tce_iommu_attach_group,
  997. .detach_group = tce_iommu_detach_group,
  998. };
  999. static int __init tce_iommu_init(void)
  1000. {
  1001. return vfio_register_iommu_driver(&tce_iommu_driver_ops);
  1002. }
  1003. static void __exit tce_iommu_cleanup(void)
  1004. {
  1005. vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
  1006. }
  1007. module_init(tce_iommu_init);
  1008. module_exit(tce_iommu_cleanup);
  1009. MODULE_VERSION(DRIVER_VERSION);
  1010. MODULE_LICENSE("GPL v2");
  1011. MODULE_AUTHOR(DRIVER_AUTHOR);
  1012. MODULE_DESCRIPTION(DRIVER_DESC);