vmmgp100.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. /*
  2. * Copyright 2017 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. */
  22. #include "vmm.h"
  23. #include <subdev/fb.h>
  24. #include <subdev/ltc.h>
  25. #include <nvif/ifc00d.h>
  26. #include <nvif/unpack.h>
  27. static inline void
  28. gp100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  29. u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
  30. {
  31. u64 data = (addr >> 4) | map->type;
  32. map->type += ptes * map->ctag;
  33. while (ptes--) {
  34. VMM_WO064(pt, vmm, ptei++ * 8, data);
  35. data += map->next;
  36. }
  37. }
  38. static void
  39. gp100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  40. u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
  41. {
  42. VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
  43. }
  44. static void
  45. gp100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  46. u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
  47. {
  48. if (map->page->shift == PAGE_SHIFT) {
  49. VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
  50. nvkm_kmap(pt->memory);
  51. while (ptes--) {
  52. const u64 data = (*map->dma++ >> 4) | map->type;
  53. VMM_WO064(pt, vmm, ptei++ * 8, data);
  54. map->type += map->ctag;
  55. }
  56. nvkm_done(pt->memory);
  57. return;
  58. }
  59. VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
  60. }
  61. static void
  62. gp100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  63. u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
  64. {
  65. VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
  66. }
  67. static void
  68. gp100_vmm_pgt_sparse(struct nvkm_vmm *vmm,
  69. struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
  70. {
  71. /* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */
  72. VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(3) /* VOL. */, ptes);
  73. }
  74. static const struct nvkm_vmm_desc_func
  75. gp100_vmm_desc_spt = {
  76. .unmap = gf100_vmm_pgt_unmap,
  77. .sparse = gp100_vmm_pgt_sparse,
  78. .mem = gp100_vmm_pgt_mem,
  79. .dma = gp100_vmm_pgt_dma,
  80. .sgl = gp100_vmm_pgt_sgl,
  81. };
  82. static void
  83. gp100_vmm_lpt_invalid(struct nvkm_vmm *vmm,
  84. struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
  85. {
  86. /* VALID_FALSE + PRIV tells the MMU to ignore corresponding SPTEs. */
  87. VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(5) /* PRIV. */, ptes);
  88. }
  89. static const struct nvkm_vmm_desc_func
  90. gp100_vmm_desc_lpt = {
  91. .invalid = gp100_vmm_lpt_invalid,
  92. .unmap = gf100_vmm_pgt_unmap,
  93. .sparse = gp100_vmm_pgt_sparse,
  94. .mem = gp100_vmm_pgt_mem,
  95. };
  96. static inline void
  97. gp100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  98. u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
  99. {
  100. u64 data = (addr >> 4) | map->type;
  101. map->type += ptes * map->ctag;
  102. while (ptes--) {
  103. VMM_WO128(pt, vmm, ptei++ * 0x10, data, 0ULL);
  104. data += map->next;
  105. }
  106. }
  107. static void
  108. gp100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
  109. u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
  110. {
  111. VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pd0_pte);
  112. }
  113. static inline bool
  114. gp100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data)
  115. {
  116. switch (nvkm_memory_target(pt->memory)) {
  117. case NVKM_MEM_TARGET_VRAM: *data |= 1ULL << 1; break;
  118. case NVKM_MEM_TARGET_HOST: *data |= 2ULL << 1;
  119. *data |= BIT_ULL(3); /* VOL. */
  120. break;
  121. case NVKM_MEM_TARGET_NCOH: *data |= 3ULL << 1; break;
  122. default:
  123. WARN_ON(1);
  124. return false;
  125. }
  126. *data |= pt->addr >> 4;
  127. return true;
  128. }
  129. static void
  130. gp100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
  131. {
  132. struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
  133. struct nvkm_mmu_pt *pd = pgd->pt[0];
  134. u64 data[2] = {};
  135. if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0]))
  136. return;
  137. if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1]))
  138. return;
  139. nvkm_kmap(pd->memory);
  140. VMM_WO128(pd, vmm, pdei * 0x10, data[0], data[1]);
  141. nvkm_done(pd->memory);
  142. }
  143. static void
  144. gp100_vmm_pd0_sparse(struct nvkm_vmm *vmm,
  145. struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
  146. {
  147. /* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */
  148. VMM_FO128(pt, vmm, pdei * 0x10, BIT_ULL(3) /* VOL_BIG. */, 0ULL, pdes);
  149. }
  150. static void
  151. gp100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
  152. struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
  153. {
  154. VMM_FO128(pt, vmm, pdei * 0x10, 0ULL, 0ULL, pdes);
  155. }
  156. static const struct nvkm_vmm_desc_func
  157. gp100_vmm_desc_pd0 = {
  158. .unmap = gp100_vmm_pd0_unmap,
  159. .sparse = gp100_vmm_pd0_sparse,
  160. .pde = gp100_vmm_pd0_pde,
  161. .mem = gp100_vmm_pd0_mem,
  162. };
  163. static void
  164. gp100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
  165. {
  166. struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
  167. struct nvkm_mmu_pt *pd = pgd->pt[0];
  168. u64 data = 0;
  169. if (!gp100_vmm_pde(pgt->pt[0], &data))
  170. return;
  171. nvkm_kmap(pd->memory);
  172. VMM_WO064(pd, vmm, pdei * 8, data);
  173. nvkm_done(pd->memory);
  174. }
  175. static const struct nvkm_vmm_desc_func
  176. gp100_vmm_desc_pd1 = {
  177. .unmap = gf100_vmm_pgt_unmap,
  178. .sparse = gp100_vmm_pgt_sparse,
  179. .pde = gp100_vmm_pd1_pde,
  180. };
  181. const struct nvkm_vmm_desc
  182. gp100_vmm_desc_16[] = {
  183. { LPT, 5, 8, 0x0100, &gp100_vmm_desc_lpt },
  184. { PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
  185. { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
  186. { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
  187. { PGD, 2, 8, 0x1000, &gp100_vmm_desc_pd1 },
  188. {}
  189. };
  190. const struct nvkm_vmm_desc
  191. gp100_vmm_desc_12[] = {
  192. { SPT, 9, 8, 0x1000, &gp100_vmm_desc_spt },
  193. { PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
  194. { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
  195. { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
  196. { PGD, 2, 8, 0x1000, &gp100_vmm_desc_pd1 },
  197. {}
  198. };
  199. int
  200. gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
  201. struct nvkm_vmm_map *map)
  202. {
  203. const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
  204. const struct nvkm_vmm_page *page = map->page;
  205. union {
  206. struct gp100_vmm_map_vn vn;
  207. struct gp100_vmm_map_v0 v0;
  208. } *args = argv;
  209. struct nvkm_device *device = vmm->mmu->subdev.device;
  210. struct nvkm_memory *memory = map->memory;
  211. u8 kind, priv, ro, vol;
  212. int kindn, aper, ret = -ENOSYS;
  213. const u8 *kindm;
  214. map->next = (1ULL << page->shift) >> 4;
  215. map->type = 0;
  216. if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
  217. vol = !!args->v0.vol;
  218. ro = !!args->v0.ro;
  219. priv = !!args->v0.priv;
  220. kind = args->v0.kind;
  221. } else
  222. if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
  223. vol = target == NVKM_MEM_TARGET_HOST;
  224. ro = 0;
  225. priv = 0;
  226. kind = 0x00;
  227. } else {
  228. VMM_DEBUG(vmm, "args");
  229. return ret;
  230. }
  231. aper = vmm->func->aper(target);
  232. if (WARN_ON(aper < 0))
  233. return aper;
  234. kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
  235. if (kind >= kindn || kindm[kind] == 0xff) {
  236. VMM_DEBUG(vmm, "kind %02x", kind);
  237. return -EINVAL;
  238. }
  239. if (kindm[kind] != kind) {
  240. u64 tags = nvkm_memory_size(memory) >> 16;
  241. if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
  242. VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
  243. return -EINVAL;
  244. }
  245. ret = nvkm_memory_tags_get(memory, device, tags,
  246. nvkm_ltc_tags_clear,
  247. &map->tags);
  248. if (ret) {
  249. VMM_DEBUG(vmm, "comp %d", ret);
  250. return ret;
  251. }
  252. if (map->tags->mn) {
  253. tags = map->tags->mn->offset + (map->offset >> 16);
  254. map->ctag |= ((1ULL << page->shift) >> 16) << 36;
  255. map->type |= tags << 36;
  256. map->next |= map->ctag;
  257. } else {
  258. kind = kindm[kind];
  259. }
  260. }
  261. map->type |= BIT(0);
  262. map->type |= (u64)aper << 1;
  263. map->type |= (u64) vol << 3;
  264. map->type |= (u64)priv << 5;
  265. map->type |= (u64) ro << 6;
  266. map->type |= (u64)kind << 56;
  267. return 0;
  268. }
  269. void
  270. gp100_vmm_flush(struct nvkm_vmm *vmm, int depth)
  271. {
  272. gf100_vmm_flush_(vmm, 5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth);
  273. }
  274. int
  275. gp100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
  276. {
  277. const u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11); /* 64KiB */
  278. return gf100_vmm_join_(vmm, inst, base);
  279. }
  280. static const struct nvkm_vmm_func
  281. gp100_vmm = {
  282. .join = gp100_vmm_join,
  283. .part = gf100_vmm_part,
  284. .aper = gf100_vmm_aper,
  285. .valid = gp100_vmm_valid,
  286. .flush = gp100_vmm_flush,
  287. .page = {
  288. { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
  289. { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
  290. { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
  291. { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
  292. { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
  293. { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
  294. {}
  295. }
  296. };
  297. int
  298. gp100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
  299. struct lock_class_key *key, const char *name,
  300. struct nvkm_vmm **pvmm)
  301. {
  302. return nv04_vmm_new_(&gp100_vmm, mmu, 0, addr, size,
  303. argv, argc, key, name, pvmm);
  304. }