tegra-smmu.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112
  1. /*
  2. * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/bitops.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/err.h>
  11. #include <linux/iommu.h>
  12. #include <linux/kernel.h>
  13. #include <linux/of.h>
  14. #include <linux/of_device.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/slab.h>
  17. #include <linux/dma-mapping.h>
  18. #include <soc/tegra/ahb.h>
  19. #include <soc/tegra/mc.h>
  20. struct tegra_smmu_group {
  21. struct list_head list;
  22. const struct tegra_smmu_group_soc *soc;
  23. struct iommu_group *group;
  24. };
  25. struct tegra_smmu {
  26. void __iomem *regs;
  27. struct device *dev;
  28. struct tegra_mc *mc;
  29. const struct tegra_smmu_soc *soc;
  30. struct list_head groups;
  31. unsigned long pfn_mask;
  32. unsigned long tlb_mask;
  33. unsigned long *asids;
  34. struct mutex lock;
  35. struct list_head list;
  36. struct dentry *debugfs;
  37. struct iommu_device iommu; /* IOMMU Core code handle */
  38. };
  39. struct tegra_smmu_as {
  40. struct iommu_domain domain;
  41. struct tegra_smmu *smmu;
  42. unsigned int use_count;
  43. u32 *count;
  44. struct page **pts;
  45. struct page *pd;
  46. dma_addr_t pd_dma;
  47. unsigned id;
  48. u32 attr;
  49. };
  50. static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
  51. {
  52. return container_of(dom, struct tegra_smmu_as, domain);
  53. }
  54. static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
  55. unsigned long offset)
  56. {
  57. writel(value, smmu->regs + offset);
  58. }
  59. static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
  60. {
  61. return readl(smmu->regs + offset);
  62. }
  63. #define SMMU_CONFIG 0x010
  64. #define SMMU_CONFIG_ENABLE (1 << 0)
  65. #define SMMU_TLB_CONFIG 0x14
  66. #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
  67. #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
  68. #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
  69. ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
  70. #define SMMU_PTC_CONFIG 0x18
  71. #define SMMU_PTC_CONFIG_ENABLE (1 << 29)
  72. #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
  73. #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
  74. #define SMMU_PTB_ASID 0x01c
  75. #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
  76. #define SMMU_PTB_DATA 0x020
  77. #define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
  78. #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
  79. #define SMMU_TLB_FLUSH 0x030
  80. #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
  81. #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
  82. #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
  83. #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
  84. SMMU_TLB_FLUSH_VA_MATCH_SECTION)
  85. #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
  86. SMMU_TLB_FLUSH_VA_MATCH_GROUP)
  87. #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
  88. #define SMMU_PTC_FLUSH 0x034
  89. #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
  90. #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
  91. #define SMMU_PTC_FLUSH_HI 0x9b8
  92. #define SMMU_PTC_FLUSH_HI_MASK 0x3
  93. /* per-SWGROUP SMMU_*_ASID register */
  94. #define SMMU_ASID_ENABLE (1 << 31)
  95. #define SMMU_ASID_MASK 0x7f
  96. #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
  97. /* page table definitions */
  98. #define SMMU_NUM_PDE 1024
  99. #define SMMU_NUM_PTE 1024
  100. #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
  101. #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
  102. #define SMMU_PDE_SHIFT 22
  103. #define SMMU_PTE_SHIFT 12
  104. #define SMMU_PD_READABLE (1 << 31)
  105. #define SMMU_PD_WRITABLE (1 << 30)
  106. #define SMMU_PD_NONSECURE (1 << 29)
  107. #define SMMU_PDE_READABLE (1 << 31)
  108. #define SMMU_PDE_WRITABLE (1 << 30)
  109. #define SMMU_PDE_NONSECURE (1 << 29)
  110. #define SMMU_PDE_NEXT (1 << 28)
  111. #define SMMU_PTE_READABLE (1 << 31)
  112. #define SMMU_PTE_WRITABLE (1 << 30)
  113. #define SMMU_PTE_NONSECURE (1 << 29)
  114. #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
  115. SMMU_PDE_NONSECURE)
  116. #define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
  117. SMMU_PTE_NONSECURE)
  118. static unsigned int iova_pd_index(unsigned long iova)
  119. {
  120. return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
  121. }
  122. static unsigned int iova_pt_index(unsigned long iova)
  123. {
  124. return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
  125. }
  126. static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
  127. {
  128. addr >>= 12;
  129. return (addr & smmu->pfn_mask) == addr;
  130. }
  131. static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
  132. {
  133. return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
  134. }
  135. static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
  136. {
  137. smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
  138. }
  139. static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
  140. unsigned long offset)
  141. {
  142. u32 value;
  143. offset &= ~(smmu->mc->soc->atom_size - 1);
  144. if (smmu->mc->soc->num_address_bits > 32) {
  145. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  146. value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
  147. #else
  148. value = 0;
  149. #endif
  150. smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
  151. }
  152. value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
  153. smmu_writel(smmu, value, SMMU_PTC_FLUSH);
  154. }
  155. static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
  156. {
  157. smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
  158. }
  159. static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
  160. unsigned long asid)
  161. {
  162. u32 value;
  163. if (smmu->soc->num_asids == 4)
  164. value = (asid & 0x3) << 29;
  165. else
  166. value = (asid & 0x7f) << 24;
  167. value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
  168. smmu_writel(smmu, value, SMMU_TLB_FLUSH);
  169. }
  170. static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
  171. unsigned long asid,
  172. unsigned long iova)
  173. {
  174. u32 value;
  175. if (smmu->soc->num_asids == 4)
  176. value = (asid & 0x3) << 29;
  177. else
  178. value = (asid & 0x7f) << 24;
  179. value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
  180. smmu_writel(smmu, value, SMMU_TLB_FLUSH);
  181. }
  182. static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
  183. unsigned long asid,
  184. unsigned long iova)
  185. {
  186. u32 value;
  187. if (smmu->soc->num_asids == 4)
  188. value = (asid & 0x3) << 29;
  189. else
  190. value = (asid & 0x7f) << 24;
  191. value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
  192. smmu_writel(smmu, value, SMMU_TLB_FLUSH);
  193. }
  194. static inline void smmu_flush(struct tegra_smmu *smmu)
  195. {
  196. smmu_readl(smmu, SMMU_CONFIG);
  197. }
  198. static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
  199. {
  200. unsigned long id;
  201. mutex_lock(&smmu->lock);
  202. id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
  203. if (id >= smmu->soc->num_asids) {
  204. mutex_unlock(&smmu->lock);
  205. return -ENOSPC;
  206. }
  207. set_bit(id, smmu->asids);
  208. *idp = id;
  209. mutex_unlock(&smmu->lock);
  210. return 0;
  211. }
  212. static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
  213. {
  214. mutex_lock(&smmu->lock);
  215. clear_bit(id, smmu->asids);
  216. mutex_unlock(&smmu->lock);
  217. }
  218. static bool tegra_smmu_capable(enum iommu_cap cap)
  219. {
  220. return false;
  221. }
  222. static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
  223. {
  224. struct tegra_smmu_as *as;
  225. if (type != IOMMU_DOMAIN_UNMANAGED)
  226. return NULL;
  227. as = kzalloc(sizeof(*as), GFP_KERNEL);
  228. if (!as)
  229. return NULL;
  230. as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
  231. as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
  232. if (!as->pd) {
  233. kfree(as);
  234. return NULL;
  235. }
  236. as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
  237. if (!as->count) {
  238. __free_page(as->pd);
  239. kfree(as);
  240. return NULL;
  241. }
  242. as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
  243. if (!as->pts) {
  244. kfree(as->count);
  245. __free_page(as->pd);
  246. kfree(as);
  247. return NULL;
  248. }
  249. /* setup aperture */
  250. as->domain.geometry.aperture_start = 0;
  251. as->domain.geometry.aperture_end = 0xffffffff;
  252. as->domain.geometry.force_aperture = true;
  253. return &as->domain;
  254. }
  255. static void tegra_smmu_domain_free(struct iommu_domain *domain)
  256. {
  257. struct tegra_smmu_as *as = to_smmu_as(domain);
  258. /* TODO: free page directory and page tables */
  259. kfree(as);
  260. }
  261. static const struct tegra_smmu_swgroup *
  262. tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
  263. {
  264. const struct tegra_smmu_swgroup *group = NULL;
  265. unsigned int i;
  266. for (i = 0; i < smmu->soc->num_swgroups; i++) {
  267. if (smmu->soc->swgroups[i].swgroup == swgroup) {
  268. group = &smmu->soc->swgroups[i];
  269. break;
  270. }
  271. }
  272. return group;
  273. }
  274. static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
  275. unsigned int asid)
  276. {
  277. const struct tegra_smmu_swgroup *group;
  278. unsigned int i;
  279. u32 value;
  280. for (i = 0; i < smmu->soc->num_clients; i++) {
  281. const struct tegra_mc_client *client = &smmu->soc->clients[i];
  282. if (client->swgroup != swgroup)
  283. continue;
  284. value = smmu_readl(smmu, client->smmu.reg);
  285. value |= BIT(client->smmu.bit);
  286. smmu_writel(smmu, value, client->smmu.reg);
  287. }
  288. group = tegra_smmu_find_swgroup(smmu, swgroup);
  289. if (group) {
  290. value = smmu_readl(smmu, group->reg);
  291. value &= ~SMMU_ASID_MASK;
  292. value |= SMMU_ASID_VALUE(asid);
  293. value |= SMMU_ASID_ENABLE;
  294. smmu_writel(smmu, value, group->reg);
  295. }
  296. }
  297. static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
  298. unsigned int asid)
  299. {
  300. const struct tegra_smmu_swgroup *group;
  301. unsigned int i;
  302. u32 value;
  303. group = tegra_smmu_find_swgroup(smmu, swgroup);
  304. if (group) {
  305. value = smmu_readl(smmu, group->reg);
  306. value &= ~SMMU_ASID_MASK;
  307. value |= SMMU_ASID_VALUE(asid);
  308. value &= ~SMMU_ASID_ENABLE;
  309. smmu_writel(smmu, value, group->reg);
  310. }
  311. for (i = 0; i < smmu->soc->num_clients; i++) {
  312. const struct tegra_mc_client *client = &smmu->soc->clients[i];
  313. if (client->swgroup != swgroup)
  314. continue;
  315. value = smmu_readl(smmu, client->smmu.reg);
  316. value &= ~BIT(client->smmu.bit);
  317. smmu_writel(smmu, value, client->smmu.reg);
  318. }
  319. }
  320. static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
  321. struct tegra_smmu_as *as)
  322. {
  323. u32 value;
  324. int err;
  325. if (as->use_count > 0) {
  326. as->use_count++;
  327. return 0;
  328. }
  329. as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
  330. DMA_TO_DEVICE);
  331. if (dma_mapping_error(smmu->dev, as->pd_dma))
  332. return -ENOMEM;
  333. /* We can't handle 64-bit DMA addresses */
  334. if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
  335. err = -ENOMEM;
  336. goto err_unmap;
  337. }
  338. err = tegra_smmu_alloc_asid(smmu, &as->id);
  339. if (err < 0)
  340. goto err_unmap;
  341. smmu_flush_ptc(smmu, as->pd_dma, 0);
  342. smmu_flush_tlb_asid(smmu, as->id);
  343. smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
  344. value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
  345. smmu_writel(smmu, value, SMMU_PTB_DATA);
  346. smmu_flush(smmu);
  347. as->smmu = smmu;
  348. as->use_count++;
  349. return 0;
  350. err_unmap:
  351. dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
  352. return err;
  353. }
  354. static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
  355. struct tegra_smmu_as *as)
  356. {
  357. if (--as->use_count > 0)
  358. return;
  359. tegra_smmu_free_asid(smmu, as->id);
  360. dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
  361. as->smmu = NULL;
  362. }
  363. static int tegra_smmu_attach_dev(struct iommu_domain *domain,
  364. struct device *dev)
  365. {
  366. struct tegra_smmu *smmu = dev->archdata.iommu;
  367. struct tegra_smmu_as *as = to_smmu_as(domain);
  368. struct device_node *np = dev->of_node;
  369. struct of_phandle_args args;
  370. unsigned int index = 0;
  371. int err = 0;
  372. while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
  373. &args)) {
  374. unsigned int swgroup = args.args[0];
  375. if (args.np != smmu->dev->of_node) {
  376. of_node_put(args.np);
  377. continue;
  378. }
  379. of_node_put(args.np);
  380. err = tegra_smmu_as_prepare(smmu, as);
  381. if (err < 0)
  382. return err;
  383. tegra_smmu_enable(smmu, swgroup, as->id);
  384. index++;
  385. }
  386. if (index == 0)
  387. return -ENODEV;
  388. return 0;
  389. }
  390. static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
  391. {
  392. struct tegra_smmu_as *as = to_smmu_as(domain);
  393. struct device_node *np = dev->of_node;
  394. struct tegra_smmu *smmu = as->smmu;
  395. struct of_phandle_args args;
  396. unsigned int index = 0;
  397. while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
  398. &args)) {
  399. unsigned int swgroup = args.args[0];
  400. if (args.np != smmu->dev->of_node) {
  401. of_node_put(args.np);
  402. continue;
  403. }
  404. of_node_put(args.np);
  405. tegra_smmu_disable(smmu, swgroup, as->id);
  406. tegra_smmu_as_unprepare(smmu, as);
  407. index++;
  408. }
  409. }
  410. static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
  411. u32 value)
  412. {
  413. unsigned int pd_index = iova_pd_index(iova);
  414. struct tegra_smmu *smmu = as->smmu;
  415. u32 *pd = page_address(as->pd);
  416. unsigned long offset = pd_index * sizeof(*pd);
  417. /* Set the page directory entry first */
  418. pd[pd_index] = value;
  419. /* The flush the page directory entry from caches */
  420. dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
  421. sizeof(*pd), DMA_TO_DEVICE);
  422. /* And flush the iommu */
  423. smmu_flush_ptc(smmu, as->pd_dma, offset);
  424. smmu_flush_tlb_section(smmu, as->id, iova);
  425. smmu_flush(smmu);
  426. }
  427. static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
  428. {
  429. u32 *pt = page_address(pt_page);
  430. return pt + iova_pt_index(iova);
  431. }
  432. static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
  433. dma_addr_t *dmap)
  434. {
  435. unsigned int pd_index = iova_pd_index(iova);
  436. struct tegra_smmu *smmu = as->smmu;
  437. struct page *pt_page;
  438. u32 *pd;
  439. pt_page = as->pts[pd_index];
  440. if (!pt_page)
  441. return NULL;
  442. pd = page_address(as->pd);
  443. *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
  444. return tegra_smmu_pte_offset(pt_page, iova);
  445. }
  446. static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
  447. dma_addr_t *dmap)
  448. {
  449. unsigned int pde = iova_pd_index(iova);
  450. struct tegra_smmu *smmu = as->smmu;
  451. if (!as->pts[pde]) {
  452. struct page *page;
  453. dma_addr_t dma;
  454. page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
  455. if (!page)
  456. return NULL;
  457. dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
  458. DMA_TO_DEVICE);
  459. if (dma_mapping_error(smmu->dev, dma)) {
  460. __free_page(page);
  461. return NULL;
  462. }
  463. if (!smmu_dma_addr_valid(smmu, dma)) {
  464. dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
  465. DMA_TO_DEVICE);
  466. __free_page(page);
  467. return NULL;
  468. }
  469. as->pts[pde] = page;
  470. tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
  471. SMMU_PDE_NEXT));
  472. *dmap = dma;
  473. } else {
  474. u32 *pd = page_address(as->pd);
  475. *dmap = smmu_pde_to_dma(smmu, pd[pde]);
  476. }
  477. return tegra_smmu_pte_offset(as->pts[pde], iova);
  478. }
  479. static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
  480. {
  481. unsigned int pd_index = iova_pd_index(iova);
  482. as->count[pd_index]++;
  483. }
  484. static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
  485. {
  486. unsigned int pde = iova_pd_index(iova);
  487. struct page *page = as->pts[pde];
  488. /*
  489. * When no entries in this page table are used anymore, return the
  490. * memory page to the system.
  491. */
  492. if (--as->count[pde] == 0) {
  493. struct tegra_smmu *smmu = as->smmu;
  494. u32 *pd = page_address(as->pd);
  495. dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
  496. tegra_smmu_set_pde(as, iova, 0);
  497. dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
  498. __free_page(page);
  499. as->pts[pde] = NULL;
  500. }
  501. }
  502. static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
  503. u32 *pte, dma_addr_t pte_dma, u32 val)
  504. {
  505. struct tegra_smmu *smmu = as->smmu;
  506. unsigned long offset = offset_in_page(pte);
  507. *pte = val;
  508. dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
  509. 4, DMA_TO_DEVICE);
  510. smmu_flush_ptc(smmu, pte_dma, offset);
  511. smmu_flush_tlb_group(smmu, as->id, iova);
  512. smmu_flush(smmu);
  513. }
  514. static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
  515. phys_addr_t paddr, size_t size, int prot)
  516. {
  517. struct tegra_smmu_as *as = to_smmu_as(domain);
  518. dma_addr_t pte_dma;
  519. u32 *pte;
  520. pte = as_get_pte(as, iova, &pte_dma);
  521. if (!pte)
  522. return -ENOMEM;
  523. /* If we aren't overwriting a pre-existing entry, increment use */
  524. if (*pte == 0)
  525. tegra_smmu_pte_get_use(as, iova);
  526. tegra_smmu_set_pte(as, iova, pte, pte_dma,
  527. __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
  528. return 0;
  529. }
  530. static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
  531. size_t size)
  532. {
  533. struct tegra_smmu_as *as = to_smmu_as(domain);
  534. dma_addr_t pte_dma;
  535. u32 *pte;
  536. pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
  537. if (!pte || !*pte)
  538. return 0;
  539. tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
  540. tegra_smmu_pte_put_use(as, iova);
  541. return size;
  542. }
  543. static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
  544. dma_addr_t iova)
  545. {
  546. struct tegra_smmu_as *as = to_smmu_as(domain);
  547. unsigned long pfn;
  548. dma_addr_t pte_dma;
  549. u32 *pte;
  550. pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
  551. if (!pte || !*pte)
  552. return 0;
  553. pfn = *pte & as->smmu->pfn_mask;
  554. return PFN_PHYS(pfn);
  555. }
  556. static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
  557. {
  558. struct platform_device *pdev;
  559. struct tegra_mc *mc;
  560. pdev = of_find_device_by_node(np);
  561. if (!pdev)
  562. return NULL;
  563. mc = platform_get_drvdata(pdev);
  564. if (!mc)
  565. return NULL;
  566. return mc->smmu;
  567. }
  568. static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
  569. struct of_phandle_args *args)
  570. {
  571. const struct iommu_ops *ops = smmu->iommu.ops;
  572. int err;
  573. err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops);
  574. if (err < 0) {
  575. dev_err(dev, "failed to initialize fwspec: %d\n", err);
  576. return err;
  577. }
  578. err = ops->of_xlate(dev, args);
  579. if (err < 0) {
  580. dev_err(dev, "failed to parse SW group ID: %d\n", err);
  581. iommu_fwspec_free(dev);
  582. return err;
  583. }
  584. return 0;
  585. }
  586. static int tegra_smmu_add_device(struct device *dev)
  587. {
  588. struct device_node *np = dev->of_node;
  589. struct tegra_smmu *smmu = NULL;
  590. struct iommu_group *group;
  591. struct of_phandle_args args;
  592. unsigned int index = 0;
  593. int err;
  594. while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
  595. &args) == 0) {
  596. smmu = tegra_smmu_find(args.np);
  597. if (smmu) {
  598. err = tegra_smmu_configure(smmu, dev, &args);
  599. of_node_put(args.np);
  600. if (err < 0)
  601. return err;
  602. /*
  603. * Only a single IOMMU master interface is currently
  604. * supported by the Linux kernel, so abort after the
  605. * first match.
  606. */
  607. dev->archdata.iommu = smmu;
  608. iommu_device_link(&smmu->iommu, dev);
  609. break;
  610. }
  611. of_node_put(args.np);
  612. index++;
  613. }
  614. if (!smmu)
  615. return -ENODEV;
  616. group = iommu_group_get_for_dev(dev);
  617. if (IS_ERR(group))
  618. return PTR_ERR(group);
  619. iommu_group_put(group);
  620. return 0;
  621. }
  622. static void tegra_smmu_remove_device(struct device *dev)
  623. {
  624. struct tegra_smmu *smmu = dev->archdata.iommu;
  625. if (smmu)
  626. iommu_device_unlink(&smmu->iommu, dev);
  627. dev->archdata.iommu = NULL;
  628. iommu_group_remove_device(dev);
  629. }
  630. static const struct tegra_smmu_group_soc *
  631. tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
  632. {
  633. unsigned int i, j;
  634. for (i = 0; i < smmu->soc->num_groups; i++)
  635. for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++)
  636. if (smmu->soc->groups[i].swgroups[j] == swgroup)
  637. return &smmu->soc->groups[i];
  638. return NULL;
  639. }
  640. static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
  641. unsigned int swgroup)
  642. {
  643. const struct tegra_smmu_group_soc *soc;
  644. struct tegra_smmu_group *group;
  645. soc = tegra_smmu_find_group(smmu, swgroup);
  646. if (!soc)
  647. return NULL;
  648. mutex_lock(&smmu->lock);
  649. list_for_each_entry(group, &smmu->groups, list)
  650. if (group->soc == soc) {
  651. mutex_unlock(&smmu->lock);
  652. return group->group;
  653. }
  654. group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
  655. if (!group) {
  656. mutex_unlock(&smmu->lock);
  657. return NULL;
  658. }
  659. INIT_LIST_HEAD(&group->list);
  660. group->soc = soc;
  661. group->group = iommu_group_alloc();
  662. if (IS_ERR(group->group)) {
  663. devm_kfree(smmu->dev, group);
  664. mutex_unlock(&smmu->lock);
  665. return NULL;
  666. }
  667. list_add_tail(&group->list, &smmu->groups);
  668. mutex_unlock(&smmu->lock);
  669. return group->group;
  670. }
  671. static struct iommu_group *tegra_smmu_device_group(struct device *dev)
  672. {
  673. struct iommu_fwspec *fwspec = dev->iommu_fwspec;
  674. struct tegra_smmu *smmu = dev->archdata.iommu;
  675. struct iommu_group *group;
  676. group = tegra_smmu_group_get(smmu, fwspec->ids[0]);
  677. if (!group)
  678. group = generic_device_group(dev);
  679. return group;
  680. }
  681. static int tegra_smmu_of_xlate(struct device *dev,
  682. struct of_phandle_args *args)
  683. {
  684. u32 id = args->args[0];
  685. return iommu_fwspec_add_ids(dev, &id, 1);
  686. }
  687. static const struct iommu_ops tegra_smmu_ops = {
  688. .capable = tegra_smmu_capable,
  689. .domain_alloc = tegra_smmu_domain_alloc,
  690. .domain_free = tegra_smmu_domain_free,
  691. .attach_dev = tegra_smmu_attach_dev,
  692. .detach_dev = tegra_smmu_detach_dev,
  693. .add_device = tegra_smmu_add_device,
  694. .remove_device = tegra_smmu_remove_device,
  695. .device_group = tegra_smmu_device_group,
  696. .map = tegra_smmu_map,
  697. .unmap = tegra_smmu_unmap,
  698. .iova_to_phys = tegra_smmu_iova_to_phys,
  699. .of_xlate = tegra_smmu_of_xlate,
  700. .pgsize_bitmap = SZ_4K,
  701. };
  702. static void tegra_smmu_ahb_enable(void)
  703. {
  704. static const struct of_device_id ahb_match[] = {
  705. { .compatible = "nvidia,tegra30-ahb", },
  706. { }
  707. };
  708. struct device_node *ahb;
  709. ahb = of_find_matching_node(NULL, ahb_match);
  710. if (ahb) {
  711. tegra_ahb_enable_smmu(ahb);
  712. of_node_put(ahb);
  713. }
  714. }
  715. static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
  716. {
  717. struct tegra_smmu *smmu = s->private;
  718. unsigned int i;
  719. u32 value;
  720. seq_printf(s, "swgroup enabled ASID\n");
  721. seq_printf(s, "------------------------\n");
  722. for (i = 0; i < smmu->soc->num_swgroups; i++) {
  723. const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
  724. const char *status;
  725. unsigned int asid;
  726. value = smmu_readl(smmu, group->reg);
  727. if (value & SMMU_ASID_ENABLE)
  728. status = "yes";
  729. else
  730. status = "no";
  731. asid = value & SMMU_ASID_MASK;
  732. seq_printf(s, "%-9s %-7s %#04x\n", group->name, status,
  733. asid);
  734. }
  735. return 0;
  736. }
  737. static int tegra_smmu_swgroups_open(struct inode *inode, struct file *file)
  738. {
  739. return single_open(file, tegra_smmu_swgroups_show, inode->i_private);
  740. }
  741. static const struct file_operations tegra_smmu_swgroups_fops = {
  742. .open = tegra_smmu_swgroups_open,
  743. .read = seq_read,
  744. .llseek = seq_lseek,
  745. .release = single_release,
  746. };
  747. static int tegra_smmu_clients_show(struct seq_file *s, void *data)
  748. {
  749. struct tegra_smmu *smmu = s->private;
  750. unsigned int i;
  751. u32 value;
  752. seq_printf(s, "client enabled\n");
  753. seq_printf(s, "--------------------\n");
  754. for (i = 0; i < smmu->soc->num_clients; i++) {
  755. const struct tegra_mc_client *client = &smmu->soc->clients[i];
  756. const char *status;
  757. value = smmu_readl(smmu, client->smmu.reg);
  758. if (value & BIT(client->smmu.bit))
  759. status = "yes";
  760. else
  761. status = "no";
  762. seq_printf(s, "%-12s %s\n", client->name, status);
  763. }
  764. return 0;
  765. }
  766. static int tegra_smmu_clients_open(struct inode *inode, struct file *file)
  767. {
  768. return single_open(file, tegra_smmu_clients_show, inode->i_private);
  769. }
  770. static const struct file_operations tegra_smmu_clients_fops = {
  771. .open = tegra_smmu_clients_open,
  772. .read = seq_read,
  773. .llseek = seq_lseek,
  774. .release = single_release,
  775. };
  776. static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
  777. {
  778. smmu->debugfs = debugfs_create_dir("smmu", NULL);
  779. if (!smmu->debugfs)
  780. return;
  781. debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
  782. &tegra_smmu_swgroups_fops);
  783. debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
  784. &tegra_smmu_clients_fops);
  785. }
  786. static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
  787. {
  788. debugfs_remove_recursive(smmu->debugfs);
  789. }
  790. struct tegra_smmu *tegra_smmu_probe(struct device *dev,
  791. const struct tegra_smmu_soc *soc,
  792. struct tegra_mc *mc)
  793. {
  794. struct tegra_smmu *smmu;
  795. size_t size;
  796. u32 value;
  797. int err;
  798. /* This can happen on Tegra20 which doesn't have an SMMU */
  799. if (!soc)
  800. return NULL;
  801. smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
  802. if (!smmu)
  803. return ERR_PTR(-ENOMEM);
  804. /*
  805. * This is a bit of a hack. Ideally we'd want to simply return this
  806. * value. However the IOMMU registration process will attempt to add
  807. * all devices to the IOMMU when bus_set_iommu() is called. In order
  808. * not to rely on global variables to track the IOMMU instance, we
  809. * set it here so that it can be looked up from the .add_device()
  810. * callback via the IOMMU device's .drvdata field.
  811. */
  812. mc->smmu = smmu;
  813. size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
  814. smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
  815. if (!smmu->asids)
  816. return ERR_PTR(-ENOMEM);
  817. INIT_LIST_HEAD(&smmu->groups);
  818. mutex_init(&smmu->lock);
  819. smmu->regs = mc->regs;
  820. smmu->soc = soc;
  821. smmu->dev = dev;
  822. smmu->mc = mc;
  823. smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
  824. dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
  825. mc->soc->num_address_bits, smmu->pfn_mask);
  826. smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
  827. dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
  828. smmu->tlb_mask);
  829. value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
  830. if (soc->supports_request_limit)
  831. value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
  832. smmu_writel(smmu, value, SMMU_PTC_CONFIG);
  833. value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
  834. SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
  835. if (soc->supports_round_robin_arbitration)
  836. value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
  837. smmu_writel(smmu, value, SMMU_TLB_CONFIG);
  838. smmu_flush_ptc_all(smmu);
  839. smmu_flush_tlb(smmu);
  840. smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
  841. smmu_flush(smmu);
  842. tegra_smmu_ahb_enable();
  843. err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
  844. if (err)
  845. return ERR_PTR(err);
  846. iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops);
  847. iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
  848. err = iommu_device_register(&smmu->iommu);
  849. if (err) {
  850. iommu_device_sysfs_remove(&smmu->iommu);
  851. return ERR_PTR(err);
  852. }
  853. err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
  854. if (err < 0) {
  855. iommu_device_unregister(&smmu->iommu);
  856. iommu_device_sysfs_remove(&smmu->iommu);
  857. return ERR_PTR(err);
  858. }
  859. if (IS_ENABLED(CONFIG_DEBUG_FS))
  860. tegra_smmu_debugfs_init(smmu);
  861. return smmu;
  862. }
  863. void tegra_smmu_remove(struct tegra_smmu *smmu)
  864. {
  865. iommu_device_unregister(&smmu->iommu);
  866. iommu_device_sysfs_remove(&smmu->iommu);
  867. if (IS_ENABLED(CONFIG_DEBUG_FS))
  868. tegra_smmu_debugfs_exit(smmu);
  869. }