uvmm.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. /*
  2. * Copyright 2017 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. */
  22. #include "uvmm.h"
  23. #include "umem.h"
  24. #include "ummu.h"
  25. #include <core/client.h>
  26. #include <core/memory.h>
  27. #include <nvif/if000c.h>
  28. #include <nvif/unpack.h>
  29. static const struct nvkm_object_func nvkm_uvmm;
  30. struct nvkm_vmm *
  31. nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
  32. {
  33. struct nvkm_object *object;
  34. object = nvkm_object_search(client, handle, &nvkm_uvmm);
  35. if (IS_ERR(object))
  36. return (void *)object;
  37. return nvkm_uvmm(object)->vmm;
  38. }
  39. static int
  40. nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
  41. {
  42. struct nvkm_client *client = uvmm->object.client;
  43. union {
  44. struct nvif_vmm_unmap_v0 v0;
  45. } *args = argv;
  46. struct nvkm_vmm *vmm = uvmm->vmm;
  47. struct nvkm_vma *vma;
  48. int ret = -ENOSYS;
  49. u64 addr;
  50. if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
  51. addr = args->v0.addr;
  52. } else
  53. return ret;
  54. mutex_lock(&vmm->mutex);
  55. vma = nvkm_vmm_node_search(vmm, addr);
  56. if (ret = -ENOENT, !vma || vma->addr != addr) {
  57. VMM_DEBUG(vmm, "lookup %016llx: %016llx",
  58. addr, vma ? vma->addr : ~0ULL);
  59. goto done;
  60. }
  61. if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
  62. VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
  63. vma->user, !client->super, vma->busy);
  64. goto done;
  65. }
  66. if (ret = -EINVAL, !vma->memory) {
  67. VMM_DEBUG(vmm, "unmapped");
  68. goto done;
  69. }
  70. nvkm_vmm_unmap_locked(vmm, vma);
  71. ret = 0;
  72. done:
  73. mutex_unlock(&vmm->mutex);
  74. return ret;
  75. }
  76. static int
  77. nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
  78. {
  79. struct nvkm_client *client = uvmm->object.client;
  80. union {
  81. struct nvif_vmm_map_v0 v0;
  82. } *args = argv;
  83. u64 addr, size, handle, offset;
  84. struct nvkm_vmm *vmm = uvmm->vmm;
  85. struct nvkm_vma *vma;
  86. struct nvkm_memory *memory;
  87. int ret = -ENOSYS;
  88. if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
  89. addr = args->v0.addr;
  90. size = args->v0.size;
  91. handle = args->v0.memory;
  92. offset = args->v0.offset;
  93. } else
  94. return ret;
  95. memory = nvkm_umem_search(client, handle);
  96. if (IS_ERR(memory)) {
  97. VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
  98. return PTR_ERR(memory);
  99. }
  100. mutex_lock(&vmm->mutex);
  101. if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
  102. VMM_DEBUG(vmm, "lookup %016llx", addr);
  103. goto fail;
  104. }
  105. if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
  106. VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
  107. vma->user, !client->super, vma->busy);
  108. goto fail;
  109. }
  110. if (ret = -EINVAL, vma->addr != addr || vma->size != size) {
  111. if (addr + size > vma->addr + vma->size || vma->memory ||
  112. (vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) {
  113. VMM_DEBUG(vmm, "split %d %d %d "
  114. "%016llx %016llx %016llx %016llx",
  115. !!vma->memory, vma->refd, vma->mapref,
  116. addr, size, vma->addr, (u64)vma->size);
  117. goto fail;
  118. }
  119. if (vma->addr != addr) {
  120. const u64 tail = vma->size + vma->addr - addr;
  121. if (ret = -ENOMEM, !(vma = nvkm_vma_tail(vma, tail)))
  122. goto fail;
  123. vma->part = true;
  124. nvkm_vmm_node_insert(vmm, vma);
  125. }
  126. if (vma->size != size) {
  127. const u64 tail = vma->size - size;
  128. struct nvkm_vma *tmp;
  129. if (ret = -ENOMEM, !(tmp = nvkm_vma_tail(vma, tail))) {
  130. nvkm_vmm_unmap_region(vmm, vma);
  131. goto fail;
  132. }
  133. tmp->part = true;
  134. nvkm_vmm_node_insert(vmm, tmp);
  135. }
  136. }
  137. vma->busy = true;
  138. mutex_unlock(&vmm->mutex);
  139. ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
  140. if (ret == 0) {
  141. /* Successful map will clear vma->busy. */
  142. nvkm_memory_unref(&memory);
  143. return 0;
  144. }
  145. mutex_lock(&vmm->mutex);
  146. vma->busy = false;
  147. nvkm_vmm_unmap_region(vmm, vma);
  148. fail:
  149. mutex_unlock(&vmm->mutex);
  150. nvkm_memory_unref(&memory);
  151. return ret;
  152. }
  153. static int
  154. nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
  155. {
  156. struct nvkm_client *client = uvmm->object.client;
  157. union {
  158. struct nvif_vmm_put_v0 v0;
  159. } *args = argv;
  160. struct nvkm_vmm *vmm = uvmm->vmm;
  161. struct nvkm_vma *vma;
  162. int ret = -ENOSYS;
  163. u64 addr;
  164. if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
  165. addr = args->v0.addr;
  166. } else
  167. return ret;
  168. mutex_lock(&vmm->mutex);
  169. vma = nvkm_vmm_node_search(vmm, args->v0.addr);
  170. if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
  171. VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
  172. vma ? vma->addr : ~0ULL, vma ? vma->part : 0);
  173. goto done;
  174. }
  175. if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
  176. VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
  177. vma->user, !client->super, vma->busy);
  178. goto done;
  179. }
  180. nvkm_vmm_put_locked(vmm, vma);
  181. ret = 0;
  182. done:
  183. mutex_unlock(&vmm->mutex);
  184. return ret;
  185. }
  186. static int
  187. nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
  188. {
  189. struct nvkm_client *client = uvmm->object.client;
  190. union {
  191. struct nvif_vmm_get_v0 v0;
  192. } *args = argv;
  193. struct nvkm_vmm *vmm = uvmm->vmm;
  194. struct nvkm_vma *vma;
  195. int ret = -ENOSYS;
  196. bool getref, mapref, sparse;
  197. u8 page, align;
  198. u64 size;
  199. if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
  200. getref = args->v0.type == NVIF_VMM_GET_V0_PTES;
  201. mapref = args->v0.type == NVIF_VMM_GET_V0_ADDR;
  202. sparse = args->v0.sparse;
  203. page = args->v0.page;
  204. align = args->v0.align;
  205. size = args->v0.size;
  206. } else
  207. return ret;
  208. mutex_lock(&vmm->mutex);
  209. ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
  210. page, align, size, &vma);
  211. mutex_unlock(&vmm->mutex);
  212. if (ret)
  213. return ret;
  214. args->v0.addr = vma->addr;
  215. vma->user = !client->super;
  216. return ret;
  217. }
  218. static int
  219. nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
  220. {
  221. union {
  222. struct nvif_vmm_page_v0 v0;
  223. } *args = argv;
  224. const struct nvkm_vmm_page *page;
  225. int ret = -ENOSYS;
  226. u8 type, index, nr;
  227. page = uvmm->vmm->func->page;
  228. for (nr = 0; page[nr].shift; nr++);
  229. if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
  230. if ((index = args->v0.index) >= nr)
  231. return -EINVAL;
  232. type = page[index].type;
  233. args->v0.shift = page[index].shift;
  234. args->v0.sparse = !!(type & NVKM_VMM_PAGE_SPARSE);
  235. args->v0.vram = !!(type & NVKM_VMM_PAGE_VRAM);
  236. args->v0.host = !!(type & NVKM_VMM_PAGE_HOST);
  237. args->v0.comp = !!(type & NVKM_VMM_PAGE_COMP);
  238. } else
  239. return -ENOSYS;
  240. return 0;
  241. }
  242. static int
  243. nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
  244. {
  245. struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
  246. switch (mthd) {
  247. case NVIF_VMM_V0_PAGE : return nvkm_uvmm_mthd_page (uvmm, argv, argc);
  248. case NVIF_VMM_V0_GET : return nvkm_uvmm_mthd_get (uvmm, argv, argc);
  249. case NVIF_VMM_V0_PUT : return nvkm_uvmm_mthd_put (uvmm, argv, argc);
  250. case NVIF_VMM_V0_MAP : return nvkm_uvmm_mthd_map (uvmm, argv, argc);
  251. case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
  252. default:
  253. break;
  254. }
  255. return -EINVAL;
  256. }
  257. static void *
  258. nvkm_uvmm_dtor(struct nvkm_object *object)
  259. {
  260. struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
  261. nvkm_vmm_unref(&uvmm->vmm);
  262. return uvmm;
  263. }
  264. static const struct nvkm_object_func
  265. nvkm_uvmm = {
  266. .dtor = nvkm_uvmm_dtor,
  267. .mthd = nvkm_uvmm_mthd,
  268. };
  269. int
  270. nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
  271. struct nvkm_object **pobject)
  272. {
  273. struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
  274. const bool more = oclass->base.maxver >= 0;
  275. union {
  276. struct nvif_vmm_v0 v0;
  277. } *args = argv;
  278. const struct nvkm_vmm_page *page;
  279. struct nvkm_uvmm *uvmm;
  280. int ret = -ENOSYS;
  281. u64 addr, size;
  282. if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
  283. addr = args->v0.addr;
  284. size = args->v0.size;
  285. } else
  286. return ret;
  287. if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
  288. return -ENOMEM;
  289. nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
  290. *pobject = &uvmm->object;
  291. if (!mmu->vmm) {
  292. ret = mmu->func->vmm.ctor(mmu, addr, size, argv, argc,
  293. NULL, "user", &uvmm->vmm);
  294. if (ret)
  295. return ret;
  296. uvmm->vmm->debug = max(uvmm->vmm->debug, oclass->client->debug);
  297. } else {
  298. if (size)
  299. return -EINVAL;
  300. uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
  301. }
  302. page = uvmm->vmm->func->page;
  303. args->v0.page_nr = 0;
  304. while (page && (page++)->shift)
  305. args->v0.page_nr++;
  306. args->v0.addr = uvmm->vmm->start;
  307. args->v0.size = uvmm->vmm->limit;
  308. return 0;
  309. }