mmu.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. #include <linux/pfn.h>
  2. #include <asm/xen/page.h>
  3. #include <asm/xen/hypercall.h>
  4. #include <xen/interface/memory.h>
  5. #include "multicalls.h"
  6. #include "mmu.h"
  7. /*
  8. * Protects atomic reservation decrease/increase against concurrent increases.
  9. * Also protects non-atomic updates of current_pages and balloon lists.
  10. */
  11. DEFINE_SPINLOCK(xen_reservation_lock);
  12. unsigned long arbitrary_virt_to_mfn(void *vaddr)
  13. {
  14. xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
  15. return PFN_DOWN(maddr.maddr);
  16. }
  17. xmaddr_t arbitrary_virt_to_machine(void *vaddr)
  18. {
  19. unsigned long address = (unsigned long)vaddr;
  20. unsigned int level;
  21. pte_t *pte;
  22. unsigned offset;
  23. /*
  24. * if the PFN is in the linear mapped vaddr range, we can just use
  25. * the (quick) virt_to_machine() p2m lookup
  26. */
  27. if (virt_addr_valid(vaddr))
  28. return virt_to_machine(vaddr);
  29. /* otherwise we have to do a (slower) full page-table walk */
  30. pte = lookup_address(address, &level);
  31. BUG_ON(pte == NULL);
  32. offset = address & ~PAGE_MASK;
  33. return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
  34. }
  35. EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
  36. static noinline void xen_flush_tlb_all(void)
  37. {
  38. struct mmuext_op *op;
  39. struct multicall_space mcs;
  40. preempt_disable();
  41. mcs = xen_mc_entry(sizeof(*op));
  42. op = mcs.args;
  43. op->cmd = MMUEXT_TLB_FLUSH_ALL;
  44. MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
  45. xen_mc_issue(PARAVIRT_LAZY_MMU);
  46. preempt_enable();
  47. }
  48. #define REMAP_BATCH_SIZE 16
  49. struct remap_data {
  50. xen_pfn_t *pfn;
  51. bool contiguous;
  52. bool no_translate;
  53. pgprot_t prot;
  54. struct mmu_update *mmu_update;
  55. };
  56. static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token,
  57. unsigned long addr, void *data)
  58. {
  59. struct remap_data *rmd = data;
  60. pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
  61. /*
  62. * If we have a contiguous range, just update the pfn itself,
  63. * else update pointer to be "next pfn".
  64. */
  65. if (rmd->contiguous)
  66. (*rmd->pfn)++;
  67. else
  68. rmd->pfn++;
  69. rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
  70. rmd->mmu_update->ptr |= rmd->no_translate ?
  71. MMU_PT_UPDATE_NO_TRANSLATE :
  72. MMU_NORMAL_PT_UPDATE;
  73. rmd->mmu_update->val = pte_val_ma(pte);
  74. rmd->mmu_update++;
  75. return 0;
  76. }
  77. static int do_remap_pfn(struct vm_area_struct *vma,
  78. unsigned long addr,
  79. xen_pfn_t *pfn, int nr,
  80. int *err_ptr, pgprot_t prot,
  81. unsigned int domid,
  82. bool no_translate,
  83. struct page **pages)
  84. {
  85. int err = 0;
  86. struct remap_data rmd;
  87. struct mmu_update mmu_update[REMAP_BATCH_SIZE];
  88. unsigned long range;
  89. int mapped = 0;
  90. BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
  91. rmd.pfn = pfn;
  92. rmd.prot = prot;
  93. /*
  94. * We use the err_ptr to indicate if there we are doing a contiguous
  95. * mapping or a discontigious mapping.
  96. */
  97. rmd.contiguous = !err_ptr;
  98. rmd.no_translate = no_translate;
  99. while (nr) {
  100. int index = 0;
  101. int done = 0;
  102. int batch = min(REMAP_BATCH_SIZE, nr);
  103. int batch_left = batch;
  104. range = (unsigned long)batch << PAGE_SHIFT;
  105. rmd.mmu_update = mmu_update;
  106. err = apply_to_page_range(vma->vm_mm, addr, range,
  107. remap_area_pfn_pte_fn, &rmd);
  108. if (err)
  109. goto out;
  110. /* We record the error for each page that gives an error, but
  111. * continue mapping until the whole set is done */
  112. do {
  113. int i;
  114. err = HYPERVISOR_mmu_update(&mmu_update[index],
  115. batch_left, &done, domid);
  116. /*
  117. * @err_ptr may be the same buffer as @gfn, so
  118. * only clear it after each chunk of @gfn is
  119. * used.
  120. */
  121. if (err_ptr) {
  122. for (i = index; i < index + done; i++)
  123. err_ptr[i] = 0;
  124. }
  125. if (err < 0) {
  126. if (!err_ptr)
  127. goto out;
  128. err_ptr[i] = err;
  129. done++; /* Skip failed frame. */
  130. } else
  131. mapped += done;
  132. batch_left -= done;
  133. index += done;
  134. } while (batch_left);
  135. nr -= batch;
  136. addr += range;
  137. if (err_ptr)
  138. err_ptr += batch;
  139. cond_resched();
  140. }
  141. out:
  142. xen_flush_tlb_all();
  143. return err < 0 ? err : mapped;
  144. }
  145. int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
  146. unsigned long addr,
  147. xen_pfn_t gfn, int nr,
  148. pgprot_t prot, unsigned domid,
  149. struct page **pages)
  150. {
  151. if (xen_feature(XENFEAT_auto_translated_physmap))
  152. return -EOPNOTSUPP;
  153. return do_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
  154. pages);
  155. }
  156. EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
  157. int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
  158. unsigned long addr,
  159. xen_pfn_t *gfn, int nr,
  160. int *err_ptr, pgprot_t prot,
  161. unsigned domid, struct page **pages)
  162. {
  163. if (xen_feature(XENFEAT_auto_translated_physmap))
  164. return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
  165. prot, domid, pages);
  166. /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
  167. * and the consequences later is quite hard to detect what the actual
  168. * cause of "wrong memory was mapped in".
  169. */
  170. BUG_ON(err_ptr == NULL);
  171. return do_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
  172. false, pages);
  173. }
  174. EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
  175. int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
  176. unsigned long addr,
  177. xen_pfn_t *mfn, int nr,
  178. int *err_ptr, pgprot_t prot,
  179. unsigned int domid, struct page **pages)
  180. {
  181. if (xen_feature(XENFEAT_auto_translated_physmap))
  182. return -EOPNOTSUPP;
  183. return do_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
  184. true, pages);
  185. }
  186. EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
  187. /* Returns: 0 success */
  188. int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
  189. int nr, struct page **pages)
  190. {
  191. if (xen_feature(XENFEAT_auto_translated_physmap))
  192. return xen_xlate_unmap_gfn_range(vma, nr, pages);
  193. if (!pages)
  194. return 0;
  195. return -EINVAL;
  196. }
  197. EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);