privcmd.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029
  1. /******************************************************************************
  2. * privcmd.c
  3. *
  4. * Interface to privileged domain-0 commands.
  5. *
  6. * Copyright (c) 2002-2004, K A Fraser, B Dragovic
  7. */
  8. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/string.h>
  14. #include <linux/errno.h>
  15. #include <linux/mm.h>
  16. #include <linux/mman.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/swap.h>
  19. #include <linux/highmem.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/miscdevice.h>
  23. #include <linux/moduleparam.h>
  24. #include <asm/pgalloc.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/tlb.h>
  27. #include <asm/xen/hypervisor.h>
  28. #include <asm/xen/hypercall.h>
  29. #include <xen/xen.h>
  30. #include <xen/privcmd.h>
  31. #include <xen/interface/xen.h>
  32. #include <xen/interface/memory.h>
  33. #include <xen/interface/hvm/dm_op.h>
  34. #include <xen/features.h>
  35. #include <xen/page.h>
  36. #include <xen/xen-ops.h>
  37. #include <xen/balloon.h>
  38. #include "privcmd.h"
  39. MODULE_LICENSE("GPL");
  40. #define PRIV_VMA_LOCKED ((void *)1)
  41. static unsigned int privcmd_dm_op_max_num = 16;
  42. module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
  43. MODULE_PARM_DESC(dm_op_max_nr_bufs,
  44. "Maximum number of buffers per dm_op hypercall");
  45. static unsigned int privcmd_dm_op_buf_max_size = 4096;
  46. module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
  47. 0644);
  48. MODULE_PARM_DESC(dm_op_buf_max_size,
  49. "Maximum size of a dm_op hypercall buffer");
  50. struct privcmd_data {
  51. domid_t domid;
  52. };
  53. static int privcmd_vma_range_is_mapped(
  54. struct vm_area_struct *vma,
  55. unsigned long addr,
  56. unsigned long nr_pages);
  57. static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
  58. {
  59. struct privcmd_data *data = file->private_data;
  60. struct privcmd_hypercall hypercall;
  61. long ret;
  62. /* Disallow arbitrary hypercalls if restricted */
  63. if (data->domid != DOMID_INVALID)
  64. return -EPERM;
  65. if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
  66. return -EFAULT;
  67. xen_preemptible_hcall_begin();
  68. ret = privcmd_call(hypercall.op,
  69. hypercall.arg[0], hypercall.arg[1],
  70. hypercall.arg[2], hypercall.arg[3],
  71. hypercall.arg[4]);
  72. xen_preemptible_hcall_end();
  73. return ret;
  74. }
  75. static void free_page_list(struct list_head *pages)
  76. {
  77. struct page *p, *n;
  78. list_for_each_entry_safe(p, n, pages, lru)
  79. __free_page(p);
  80. INIT_LIST_HEAD(pages);
  81. }
  82. /*
  83. * Given an array of items in userspace, return a list of pages
  84. * containing the data. If copying fails, either because of memory
  85. * allocation failure or a problem reading user memory, return an
  86. * error code; its up to the caller to dispose of any partial list.
  87. */
  88. static int gather_array(struct list_head *pagelist,
  89. unsigned nelem, size_t size,
  90. const void __user *data)
  91. {
  92. unsigned pageidx;
  93. void *pagedata;
  94. int ret;
  95. if (size > PAGE_SIZE)
  96. return 0;
  97. pageidx = PAGE_SIZE;
  98. pagedata = NULL; /* quiet, gcc */
  99. while (nelem--) {
  100. if (pageidx > PAGE_SIZE-size) {
  101. struct page *page = alloc_page(GFP_KERNEL);
  102. ret = -ENOMEM;
  103. if (page == NULL)
  104. goto fail;
  105. pagedata = page_address(page);
  106. list_add_tail(&page->lru, pagelist);
  107. pageidx = 0;
  108. }
  109. ret = -EFAULT;
  110. if (copy_from_user(pagedata + pageidx, data, size))
  111. goto fail;
  112. data += size;
  113. pageidx += size;
  114. }
  115. ret = 0;
  116. fail:
  117. return ret;
  118. }
  119. /*
  120. * Call function "fn" on each element of the array fragmented
  121. * over a list of pages.
  122. */
  123. static int traverse_pages(unsigned nelem, size_t size,
  124. struct list_head *pos,
  125. int (*fn)(void *data, void *state),
  126. void *state)
  127. {
  128. void *pagedata;
  129. unsigned pageidx;
  130. int ret = 0;
  131. BUG_ON(size > PAGE_SIZE);
  132. pageidx = PAGE_SIZE;
  133. pagedata = NULL; /* hush, gcc */
  134. while (nelem--) {
  135. if (pageidx > PAGE_SIZE-size) {
  136. struct page *page;
  137. pos = pos->next;
  138. page = list_entry(pos, struct page, lru);
  139. pagedata = page_address(page);
  140. pageidx = 0;
  141. }
  142. ret = (*fn)(pagedata + pageidx, state);
  143. if (ret)
  144. break;
  145. pageidx += size;
  146. }
  147. return ret;
  148. }
  149. /*
  150. * Similar to traverse_pages, but use each page as a "block" of
  151. * data to be processed as one unit.
  152. */
  153. static int traverse_pages_block(unsigned nelem, size_t size,
  154. struct list_head *pos,
  155. int (*fn)(void *data, int nr, void *state),
  156. void *state)
  157. {
  158. void *pagedata;
  159. int ret = 0;
  160. BUG_ON(size > PAGE_SIZE);
  161. while (nelem) {
  162. int nr = (PAGE_SIZE/size);
  163. struct page *page;
  164. if (nr > nelem)
  165. nr = nelem;
  166. pos = pos->next;
  167. page = list_entry(pos, struct page, lru);
  168. pagedata = page_address(page);
  169. ret = (*fn)(pagedata, nr, state);
  170. if (ret)
  171. break;
  172. nelem -= nr;
  173. }
  174. return ret;
  175. }
  176. struct mmap_gfn_state {
  177. unsigned long va;
  178. struct vm_area_struct *vma;
  179. domid_t domain;
  180. };
  181. static int mmap_gfn_range(void *data, void *state)
  182. {
  183. struct privcmd_mmap_entry *msg = data;
  184. struct mmap_gfn_state *st = state;
  185. struct vm_area_struct *vma = st->vma;
  186. int rc;
  187. /* Do not allow range to wrap the address space. */
  188. if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
  189. ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
  190. return -EINVAL;
  191. /* Range chunks must be contiguous in va space. */
  192. if ((msg->va != st->va) ||
  193. ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
  194. return -EINVAL;
  195. rc = xen_remap_domain_gfn_range(vma,
  196. msg->va & PAGE_MASK,
  197. msg->mfn, msg->npages,
  198. vma->vm_page_prot,
  199. st->domain, NULL);
  200. if (rc < 0)
  201. return rc;
  202. st->va += msg->npages << PAGE_SHIFT;
  203. return 0;
  204. }
  205. static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
  206. {
  207. struct privcmd_data *data = file->private_data;
  208. struct privcmd_mmap mmapcmd;
  209. struct mm_struct *mm = current->mm;
  210. struct vm_area_struct *vma;
  211. int rc;
  212. LIST_HEAD(pagelist);
  213. struct mmap_gfn_state state;
  214. /* We only support privcmd_ioctl_mmap_batch for auto translated. */
  215. if (xen_feature(XENFEAT_auto_translated_physmap))
  216. return -ENOSYS;
  217. if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
  218. return -EFAULT;
  219. /* If restriction is in place, check the domid matches */
  220. if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
  221. return -EPERM;
  222. rc = gather_array(&pagelist,
  223. mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  224. mmapcmd.entry);
  225. if (rc || list_empty(&pagelist))
  226. goto out;
  227. down_write(&mm->mmap_sem);
  228. {
  229. struct page *page = list_first_entry(&pagelist,
  230. struct page, lru);
  231. struct privcmd_mmap_entry *msg = page_address(page);
  232. vma = find_vma(mm, msg->va);
  233. rc = -EINVAL;
  234. if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
  235. goto out_up;
  236. vma->vm_private_data = PRIV_VMA_LOCKED;
  237. }
  238. state.va = vma->vm_start;
  239. state.vma = vma;
  240. state.domain = mmapcmd.dom;
  241. rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  242. &pagelist,
  243. mmap_gfn_range, &state);
  244. out_up:
  245. up_write(&mm->mmap_sem);
  246. out:
  247. free_page_list(&pagelist);
  248. return rc;
  249. }
  250. struct mmap_batch_state {
  251. domid_t domain;
  252. unsigned long va;
  253. struct vm_area_struct *vma;
  254. int index;
  255. /* A tristate:
  256. * 0 for no errors
  257. * 1 if at least one error has happened (and no
  258. * -ENOENT errors have happened)
  259. * -ENOENT if at least 1 -ENOENT has happened.
  260. */
  261. int global_error;
  262. int version;
  263. /* User-space gfn array to store errors in the second pass for V1. */
  264. xen_pfn_t __user *user_gfn;
  265. /* User-space int array to store errors in the second pass for V2. */
  266. int __user *user_err;
  267. };
  268. /* auto translated dom0 note: if domU being created is PV, then gfn is
  269. * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
  270. */
  271. static int mmap_batch_fn(void *data, int nr, void *state)
  272. {
  273. xen_pfn_t *gfnp = data;
  274. struct mmap_batch_state *st = state;
  275. struct vm_area_struct *vma = st->vma;
  276. struct page **pages = vma->vm_private_data;
  277. struct page **cur_pages = NULL;
  278. int ret;
  279. if (xen_feature(XENFEAT_auto_translated_physmap))
  280. cur_pages = &pages[st->index];
  281. BUG_ON(nr < 0);
  282. ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
  283. (int *)gfnp, st->vma->vm_page_prot,
  284. st->domain, cur_pages);
  285. /* Adjust the global_error? */
  286. if (ret != nr) {
  287. if (ret == -ENOENT)
  288. st->global_error = -ENOENT;
  289. else {
  290. /* Record that at least one error has happened. */
  291. if (st->global_error == 0)
  292. st->global_error = 1;
  293. }
  294. }
  295. st->va += XEN_PAGE_SIZE * nr;
  296. st->index += nr / XEN_PFN_PER_PAGE;
  297. return 0;
  298. }
  299. static int mmap_return_error(int err, struct mmap_batch_state *st)
  300. {
  301. int ret;
  302. if (st->version == 1) {
  303. if (err) {
  304. xen_pfn_t gfn;
  305. ret = get_user(gfn, st->user_gfn);
  306. if (ret < 0)
  307. return ret;
  308. /*
  309. * V1 encodes the error codes in the 32bit top
  310. * nibble of the gfn (with its known
  311. * limitations vis-a-vis 64 bit callers).
  312. */
  313. gfn |= (err == -ENOENT) ?
  314. PRIVCMD_MMAPBATCH_PAGED_ERROR :
  315. PRIVCMD_MMAPBATCH_MFN_ERROR;
  316. return __put_user(gfn, st->user_gfn++);
  317. } else
  318. st->user_gfn++;
  319. } else { /* st->version == 2 */
  320. if (err)
  321. return __put_user(err, st->user_err++);
  322. else
  323. st->user_err++;
  324. }
  325. return 0;
  326. }
  327. static int mmap_return_errors(void *data, int nr, void *state)
  328. {
  329. struct mmap_batch_state *st = state;
  330. int *errs = data;
  331. int i;
  332. int ret;
  333. for (i = 0; i < nr; i++) {
  334. ret = mmap_return_error(errs[i], st);
  335. if (ret < 0)
  336. return ret;
  337. }
  338. return 0;
  339. }
  340. /* Allocate pfns that are then mapped with gfns from foreign domid. Update
  341. * the vma with the page info to use later.
  342. * Returns: 0 if success, otherwise -errno
  343. */
  344. static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
  345. {
  346. int rc;
  347. struct page **pages;
  348. pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
  349. if (pages == NULL)
  350. return -ENOMEM;
  351. rc = alloc_xenballooned_pages(numpgs, pages);
  352. if (rc != 0) {
  353. pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
  354. numpgs, rc);
  355. kfree(pages);
  356. return -ENOMEM;
  357. }
  358. BUG_ON(vma->vm_private_data != NULL);
  359. vma->vm_private_data = pages;
  360. return 0;
  361. }
  362. static const struct vm_operations_struct privcmd_vm_ops;
  363. static long privcmd_ioctl_mmap_batch(
  364. struct file *file, void __user *udata, int version)
  365. {
  366. struct privcmd_data *data = file->private_data;
  367. int ret;
  368. struct privcmd_mmapbatch_v2 m;
  369. struct mm_struct *mm = current->mm;
  370. struct vm_area_struct *vma;
  371. unsigned long nr_pages;
  372. LIST_HEAD(pagelist);
  373. struct mmap_batch_state state;
  374. switch (version) {
  375. case 1:
  376. if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
  377. return -EFAULT;
  378. /* Returns per-frame error in m.arr. */
  379. m.err = NULL;
  380. if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
  381. return -EFAULT;
  382. break;
  383. case 2:
  384. if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
  385. return -EFAULT;
  386. /* Returns per-frame error code in m.err. */
  387. if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
  388. return -EFAULT;
  389. break;
  390. default:
  391. return -EINVAL;
  392. }
  393. /* If restriction is in place, check the domid matches */
  394. if (data->domid != DOMID_INVALID && data->domid != m.dom)
  395. return -EPERM;
  396. nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
  397. if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
  398. return -EINVAL;
  399. ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
  400. if (ret)
  401. goto out;
  402. if (list_empty(&pagelist)) {
  403. ret = -EINVAL;
  404. goto out;
  405. }
  406. if (version == 2) {
  407. /* Zero error array now to only copy back actual errors. */
  408. if (clear_user(m.err, sizeof(int) * m.num)) {
  409. ret = -EFAULT;
  410. goto out;
  411. }
  412. }
  413. down_write(&mm->mmap_sem);
  414. vma = find_vma(mm, m.addr);
  415. if (!vma ||
  416. vma->vm_ops != &privcmd_vm_ops) {
  417. ret = -EINVAL;
  418. goto out_unlock;
  419. }
  420. /*
  421. * Caller must either:
  422. *
  423. * Map the whole VMA range, which will also allocate all the
  424. * pages required for the auto_translated_physmap case.
  425. *
  426. * Or
  427. *
  428. * Map unmapped holes left from a previous map attempt (e.g.,
  429. * because those foreign frames were previously paged out).
  430. */
  431. if (vma->vm_private_data == NULL) {
  432. if (m.addr != vma->vm_start ||
  433. m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
  434. ret = -EINVAL;
  435. goto out_unlock;
  436. }
  437. if (xen_feature(XENFEAT_auto_translated_physmap)) {
  438. ret = alloc_empty_pages(vma, nr_pages);
  439. if (ret < 0)
  440. goto out_unlock;
  441. } else
  442. vma->vm_private_data = PRIV_VMA_LOCKED;
  443. } else {
  444. if (m.addr < vma->vm_start ||
  445. m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
  446. ret = -EINVAL;
  447. goto out_unlock;
  448. }
  449. if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
  450. ret = -EINVAL;
  451. goto out_unlock;
  452. }
  453. }
  454. state.domain = m.dom;
  455. state.vma = vma;
  456. state.va = m.addr;
  457. state.index = 0;
  458. state.global_error = 0;
  459. state.version = version;
  460. BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
  461. /* mmap_batch_fn guarantees ret == 0 */
  462. BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
  463. &pagelist, mmap_batch_fn, &state));
  464. up_write(&mm->mmap_sem);
  465. if (state.global_error) {
  466. /* Write back errors in second pass. */
  467. state.user_gfn = (xen_pfn_t *)m.arr;
  468. state.user_err = m.err;
  469. ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
  470. &pagelist, mmap_return_errors, &state);
  471. } else
  472. ret = 0;
  473. /* If we have not had any EFAULT-like global errors then set the global
  474. * error to -ENOENT if necessary. */
  475. if ((ret == 0) && (state.global_error == -ENOENT))
  476. ret = -ENOENT;
  477. out:
  478. free_page_list(&pagelist);
  479. return ret;
  480. out_unlock:
  481. up_write(&mm->mmap_sem);
  482. goto out;
  483. }
  484. static int lock_pages(
  485. struct privcmd_dm_op_buf kbufs[], unsigned int num,
  486. struct page *pages[], unsigned int nr_pages)
  487. {
  488. unsigned int i;
  489. for (i = 0; i < num; i++) {
  490. unsigned int requested;
  491. int pinned;
  492. requested = DIV_ROUND_UP(
  493. offset_in_page(kbufs[i].uptr) + kbufs[i].size,
  494. PAGE_SIZE);
  495. if (requested > nr_pages)
  496. return -ENOSPC;
  497. pinned = get_user_pages_fast(
  498. (unsigned long) kbufs[i].uptr,
  499. requested, FOLL_WRITE, pages);
  500. if (pinned < 0)
  501. return pinned;
  502. nr_pages -= pinned;
  503. pages += pinned;
  504. }
  505. return 0;
  506. }
  507. static void unlock_pages(struct page *pages[], unsigned int nr_pages)
  508. {
  509. unsigned int i;
  510. if (!pages)
  511. return;
  512. for (i = 0; i < nr_pages; i++) {
  513. if (pages[i])
  514. put_page(pages[i]);
  515. }
  516. }
  517. static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
  518. {
  519. struct privcmd_data *data = file->private_data;
  520. struct privcmd_dm_op kdata;
  521. struct privcmd_dm_op_buf *kbufs;
  522. unsigned int nr_pages = 0;
  523. struct page **pages = NULL;
  524. struct xen_dm_op_buf *xbufs = NULL;
  525. unsigned int i;
  526. long rc;
  527. if (copy_from_user(&kdata, udata, sizeof(kdata)))
  528. return -EFAULT;
  529. /* If restriction is in place, check the domid matches */
  530. if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
  531. return -EPERM;
  532. if (kdata.num == 0)
  533. return 0;
  534. if (kdata.num > privcmd_dm_op_max_num)
  535. return -E2BIG;
  536. kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
  537. if (!kbufs)
  538. return -ENOMEM;
  539. if (copy_from_user(kbufs, kdata.ubufs,
  540. sizeof(*kbufs) * kdata.num)) {
  541. rc = -EFAULT;
  542. goto out;
  543. }
  544. for (i = 0; i < kdata.num; i++) {
  545. if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
  546. rc = -E2BIG;
  547. goto out;
  548. }
  549. if (!access_ok(VERIFY_WRITE, kbufs[i].uptr,
  550. kbufs[i].size)) {
  551. rc = -EFAULT;
  552. goto out;
  553. }
  554. nr_pages += DIV_ROUND_UP(
  555. offset_in_page(kbufs[i].uptr) + kbufs[i].size,
  556. PAGE_SIZE);
  557. }
  558. pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
  559. if (!pages) {
  560. rc = -ENOMEM;
  561. goto out;
  562. }
  563. xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
  564. if (!xbufs) {
  565. rc = -ENOMEM;
  566. goto out;
  567. }
  568. rc = lock_pages(kbufs, kdata.num, pages, nr_pages);
  569. if (rc)
  570. goto out;
  571. for (i = 0; i < kdata.num; i++) {
  572. set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
  573. xbufs[i].size = kbufs[i].size;
  574. }
  575. xen_preemptible_hcall_begin();
  576. rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
  577. xen_preemptible_hcall_end();
  578. out:
  579. unlock_pages(pages, nr_pages);
  580. kfree(xbufs);
  581. kfree(pages);
  582. kfree(kbufs);
  583. return rc;
  584. }
  585. static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
  586. {
  587. struct privcmd_data *data = file->private_data;
  588. domid_t dom;
  589. if (copy_from_user(&dom, udata, sizeof(dom)))
  590. return -EFAULT;
  591. /* Set restriction to the specified domain, or check it matches */
  592. if (data->domid == DOMID_INVALID)
  593. data->domid = dom;
  594. else if (data->domid != dom)
  595. return -EINVAL;
  596. return 0;
  597. }
  598. struct remap_pfn {
  599. struct mm_struct *mm;
  600. struct page **pages;
  601. pgprot_t prot;
  602. unsigned long i;
  603. };
  604. static int remap_pfn_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
  605. void *data)
  606. {
  607. struct remap_pfn *r = data;
  608. struct page *page = r->pages[r->i];
  609. pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
  610. set_pte_at(r->mm, addr, ptep, pte);
  611. r->i++;
  612. return 0;
  613. }
  614. static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
  615. {
  616. struct privcmd_data *data = file->private_data;
  617. struct mm_struct *mm = current->mm;
  618. struct vm_area_struct *vma;
  619. struct privcmd_mmap_resource kdata;
  620. xen_pfn_t *pfns = NULL;
  621. struct xen_mem_acquire_resource xdata;
  622. int rc;
  623. if (copy_from_user(&kdata, udata, sizeof(kdata)))
  624. return -EFAULT;
  625. /* If restriction is in place, check the domid matches */
  626. if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
  627. return -EPERM;
  628. down_write(&mm->mmap_sem);
  629. vma = find_vma(mm, kdata.addr);
  630. if (!vma || vma->vm_ops != &privcmd_vm_ops) {
  631. rc = -EINVAL;
  632. goto out;
  633. }
  634. pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
  635. if (!pfns) {
  636. rc = -ENOMEM;
  637. goto out;
  638. }
  639. if (xen_feature(XENFEAT_auto_translated_physmap)) {
  640. unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
  641. struct page **pages;
  642. unsigned int i;
  643. rc = alloc_empty_pages(vma, nr);
  644. if (rc < 0)
  645. goto out;
  646. pages = vma->vm_private_data;
  647. for (i = 0; i < kdata.num; i++) {
  648. xen_pfn_t pfn =
  649. page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
  650. pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
  651. }
  652. } else
  653. vma->vm_private_data = PRIV_VMA_LOCKED;
  654. memset(&xdata, 0, sizeof(xdata));
  655. xdata.domid = kdata.dom;
  656. xdata.type = kdata.type;
  657. xdata.id = kdata.id;
  658. xdata.frame = kdata.idx;
  659. xdata.nr_frames = kdata.num;
  660. set_xen_guest_handle(xdata.frame_list, pfns);
  661. xen_preemptible_hcall_begin();
  662. rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
  663. xen_preemptible_hcall_end();
  664. if (rc)
  665. goto out;
  666. if (xen_feature(XENFEAT_auto_translated_physmap)) {
  667. struct remap_pfn r = {
  668. .mm = vma->vm_mm,
  669. .pages = vma->vm_private_data,
  670. .prot = vma->vm_page_prot,
  671. };
  672. rc = apply_to_page_range(r.mm, kdata.addr,
  673. kdata.num << PAGE_SHIFT,
  674. remap_pfn_fn, &r);
  675. } else {
  676. unsigned int domid =
  677. (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
  678. DOMID_SELF : kdata.dom;
  679. int num;
  680. num = xen_remap_domain_mfn_array(vma,
  681. kdata.addr & PAGE_MASK,
  682. pfns, kdata.num, (int *)pfns,
  683. vma->vm_page_prot,
  684. domid,
  685. vma->vm_private_data);
  686. if (num < 0)
  687. rc = num;
  688. else if (num != kdata.num) {
  689. unsigned int i;
  690. for (i = 0; i < num; i++) {
  691. rc = pfns[i];
  692. if (rc < 0)
  693. break;
  694. }
  695. } else
  696. rc = 0;
  697. }
  698. out:
  699. up_write(&mm->mmap_sem);
  700. kfree(pfns);
  701. return rc;
  702. }
  703. static long privcmd_ioctl(struct file *file,
  704. unsigned int cmd, unsigned long data)
  705. {
  706. int ret = -ENOTTY;
  707. void __user *udata = (void __user *) data;
  708. switch (cmd) {
  709. case IOCTL_PRIVCMD_HYPERCALL:
  710. ret = privcmd_ioctl_hypercall(file, udata);
  711. break;
  712. case IOCTL_PRIVCMD_MMAP:
  713. ret = privcmd_ioctl_mmap(file, udata);
  714. break;
  715. case IOCTL_PRIVCMD_MMAPBATCH:
  716. ret = privcmd_ioctl_mmap_batch(file, udata, 1);
  717. break;
  718. case IOCTL_PRIVCMD_MMAPBATCH_V2:
  719. ret = privcmd_ioctl_mmap_batch(file, udata, 2);
  720. break;
  721. case IOCTL_PRIVCMD_DM_OP:
  722. ret = privcmd_ioctl_dm_op(file, udata);
  723. break;
  724. case IOCTL_PRIVCMD_RESTRICT:
  725. ret = privcmd_ioctl_restrict(file, udata);
  726. break;
  727. case IOCTL_PRIVCMD_MMAP_RESOURCE:
  728. ret = privcmd_ioctl_mmap_resource(file, udata);
  729. break;
  730. default:
  731. break;
  732. }
  733. return ret;
  734. }
  735. static int privcmd_open(struct inode *ino, struct file *file)
  736. {
  737. struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
  738. if (!data)
  739. return -ENOMEM;
  740. /* DOMID_INVALID implies no restriction */
  741. data->domid = DOMID_INVALID;
  742. file->private_data = data;
  743. return 0;
  744. }
  745. static int privcmd_release(struct inode *ino, struct file *file)
  746. {
  747. struct privcmd_data *data = file->private_data;
  748. kfree(data);
  749. return 0;
  750. }
  751. static void privcmd_close(struct vm_area_struct *vma)
  752. {
  753. struct page **pages = vma->vm_private_data;
  754. int numpgs = vma_pages(vma);
  755. int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
  756. int rc;
  757. if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
  758. return;
  759. rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
  760. if (rc == 0)
  761. free_xenballooned_pages(numpgs, pages);
  762. else
  763. pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
  764. numpgs, rc);
  765. kfree(pages);
  766. }
  767. static vm_fault_t privcmd_fault(struct vm_fault *vmf)
  768. {
  769. printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
  770. vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
  771. vmf->pgoff, (void *)vmf->address);
  772. return VM_FAULT_SIGBUS;
  773. }
  774. static const struct vm_operations_struct privcmd_vm_ops = {
  775. .close = privcmd_close,
  776. .fault = privcmd_fault
  777. };
  778. static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
  779. {
  780. /* DONTCOPY is essential for Xen because copy_page_range doesn't know
  781. * how to recreate these mappings */
  782. vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
  783. VM_DONTEXPAND | VM_DONTDUMP;
  784. vma->vm_ops = &privcmd_vm_ops;
  785. vma->vm_private_data = NULL;
  786. return 0;
  787. }
  788. /*
  789. * For MMAPBATCH*. This allows asserting the singleshot mapping
  790. * on a per pfn/pte basis. Mapping calls that fail with ENOENT
  791. * can be then retried until success.
  792. */
  793. static int is_mapped_fn(pte_t *pte, struct page *pmd_page,
  794. unsigned long addr, void *data)
  795. {
  796. return pte_none(*pte) ? 0 : -EBUSY;
  797. }
  798. static int privcmd_vma_range_is_mapped(
  799. struct vm_area_struct *vma,
  800. unsigned long addr,
  801. unsigned long nr_pages)
  802. {
  803. return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
  804. is_mapped_fn, NULL) != 0;
  805. }
  806. const struct file_operations xen_privcmd_fops = {
  807. .owner = THIS_MODULE,
  808. .unlocked_ioctl = privcmd_ioctl,
  809. .open = privcmd_open,
  810. .release = privcmd_release,
  811. .mmap = privcmd_mmap,
  812. };
  813. EXPORT_SYMBOL_GPL(xen_privcmd_fops);
  814. static struct miscdevice privcmd_dev = {
  815. .minor = MISC_DYNAMIC_MINOR,
  816. .name = "xen/privcmd",
  817. .fops = &xen_privcmd_fops,
  818. };
  819. static int __init privcmd_init(void)
  820. {
  821. int err;
  822. if (!xen_domain())
  823. return -ENODEV;
  824. err = misc_register(&privcmd_dev);
  825. if (err != 0) {
  826. pr_err("Could not register Xen privcmd device\n");
  827. return err;
  828. }
  829. err = misc_register(&xen_privcmdbuf_dev);
  830. if (err != 0) {
  831. pr_err("Could not register Xen hypercall-buf device\n");
  832. misc_deregister(&privcmd_dev);
  833. return err;
  834. }
  835. return 0;
  836. }
  837. static void __exit privcmd_exit(void)
  838. {
  839. misc_deregister(&privcmd_dev);
  840. misc_deregister(&xen_privcmdbuf_dev);
  841. }
  842. module_init(privcmd_init);
  843. module_exit(privcmd_exit);