mmu.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246
  1. /*
  2. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  3. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License, version 2, as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. */
  18. #include <linux/mman.h>
  19. #include <linux/kvm_host.h>
  20. #include <linux/io.h>
  21. #include <linux/hugetlb.h>
  22. #include <linux/sched/signal.h>
  23. #include <trace/events/kvm.h>
  24. #include <asm/pgalloc.h>
  25. #include <asm/cacheflush.h>
  26. #include <asm/kvm_arm.h>
  27. #include <asm/kvm_mmu.h>
  28. #include <asm/kvm_mmio.h>
  29. #include <asm/kvm_asm.h>
  30. #include <asm/kvm_emulate.h>
  31. #include <asm/virt.h>
  32. #include <asm/system_misc.h>
  33. #include "trace.h"
  34. static pgd_t *boot_hyp_pgd;
  35. static pgd_t *hyp_pgd;
  36. static pgd_t *merged_hyp_pgd;
  37. static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
  38. static unsigned long hyp_idmap_start;
  39. static unsigned long hyp_idmap_end;
  40. static phys_addr_t hyp_idmap_vector;
  41. static unsigned long io_map_base;
  42. #define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t))
  43. #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
  44. #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
  45. #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
  46. static bool memslot_is_logging(struct kvm_memory_slot *memslot)
  47. {
  48. return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
  49. }
  50. /**
  51. * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
  52. * @kvm: pointer to kvm structure.
  53. *
  54. * Interface to HYP function to flush all VM TLB entries
  55. */
  56. void kvm_flush_remote_tlbs(struct kvm *kvm)
  57. {
  58. kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
  59. }
  60. static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
  61. {
  62. kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
  63. }
  64. /*
  65. * D-Cache management functions. They take the page table entries by
  66. * value, as they are flushing the cache using the kernel mapping (or
  67. * kmap on 32bit).
  68. */
  69. static void kvm_flush_dcache_pte(pte_t pte)
  70. {
  71. __kvm_flush_dcache_pte(pte);
  72. }
  73. static void kvm_flush_dcache_pmd(pmd_t pmd)
  74. {
  75. __kvm_flush_dcache_pmd(pmd);
  76. }
  77. static void kvm_flush_dcache_pud(pud_t pud)
  78. {
  79. __kvm_flush_dcache_pud(pud);
  80. }
  81. static bool kvm_is_device_pfn(unsigned long pfn)
  82. {
  83. return !pfn_valid(pfn);
  84. }
  85. /**
  86. * stage2_dissolve_pmd() - clear and flush huge PMD entry
  87. * @kvm: pointer to kvm structure.
  88. * @addr: IPA
  89. * @pmd: pmd pointer for IPA
  90. *
  91. * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
  92. * pages in the range dirty.
  93. */
  94. static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
  95. {
  96. if (!pmd_thp_or_huge(*pmd))
  97. return;
  98. pmd_clear(pmd);
  99. kvm_tlb_flush_vmid_ipa(kvm, addr);
  100. put_page(virt_to_page(pmd));
  101. }
  102. static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
  103. int min, int max)
  104. {
  105. void *page;
  106. BUG_ON(max > KVM_NR_MEM_OBJS);
  107. if (cache->nobjs >= min)
  108. return 0;
  109. while (cache->nobjs < max) {
  110. page = (void *)__get_free_page(PGALLOC_GFP);
  111. if (!page)
  112. return -ENOMEM;
  113. cache->objects[cache->nobjs++] = page;
  114. }
  115. return 0;
  116. }
  117. static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
  118. {
  119. while (mc->nobjs)
  120. free_page((unsigned long)mc->objects[--mc->nobjs]);
  121. }
  122. static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
  123. {
  124. void *p;
  125. BUG_ON(!mc || !mc->nobjs);
  126. p = mc->objects[--mc->nobjs];
  127. return p;
  128. }
  129. static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
  130. {
  131. pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL);
  132. stage2_pgd_clear(pgd);
  133. kvm_tlb_flush_vmid_ipa(kvm, addr);
  134. stage2_pud_free(pud_table);
  135. put_page(virt_to_page(pgd));
  136. }
  137. static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
  138. {
  139. pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0);
  140. VM_BUG_ON(stage2_pud_huge(*pud));
  141. stage2_pud_clear(pud);
  142. kvm_tlb_flush_vmid_ipa(kvm, addr);
  143. stage2_pmd_free(pmd_table);
  144. put_page(virt_to_page(pud));
  145. }
  146. static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
  147. {
  148. pte_t *pte_table = pte_offset_kernel(pmd, 0);
  149. VM_BUG_ON(pmd_thp_or_huge(*pmd));
  150. pmd_clear(pmd);
  151. kvm_tlb_flush_vmid_ipa(kvm, addr);
  152. pte_free_kernel(NULL, pte_table);
  153. put_page(virt_to_page(pmd));
  154. }
  155. static inline void kvm_set_pte(pte_t *ptep, pte_t new_pte)
  156. {
  157. WRITE_ONCE(*ptep, new_pte);
  158. dsb(ishst);
  159. }
  160. static inline void kvm_set_pmd(pmd_t *pmdp, pmd_t new_pmd)
  161. {
  162. WRITE_ONCE(*pmdp, new_pmd);
  163. dsb(ishst);
  164. }
  165. static inline void kvm_pmd_populate(pmd_t *pmdp, pte_t *ptep)
  166. {
  167. kvm_set_pmd(pmdp, kvm_mk_pmd(ptep));
  168. }
  169. static inline void kvm_pud_populate(pud_t *pudp, pmd_t *pmdp)
  170. {
  171. WRITE_ONCE(*pudp, kvm_mk_pud(pmdp));
  172. dsb(ishst);
  173. }
  174. static inline void kvm_pgd_populate(pgd_t *pgdp, pud_t *pudp)
  175. {
  176. WRITE_ONCE(*pgdp, kvm_mk_pgd(pudp));
  177. dsb(ishst);
  178. }
  179. /*
  180. * Unmapping vs dcache management:
  181. *
  182. * If a guest maps certain memory pages as uncached, all writes will
  183. * bypass the data cache and go directly to RAM. However, the CPUs
  184. * can still speculate reads (not writes) and fill cache lines with
  185. * data.
  186. *
  187. * Those cache lines will be *clean* cache lines though, so a
  188. * clean+invalidate operation is equivalent to an invalidate
  189. * operation, because no cache lines are marked dirty.
  190. *
  191. * Those clean cache lines could be filled prior to an uncached write
  192. * by the guest, and the cache coherent IO subsystem would therefore
  193. * end up writing old data to disk.
  194. *
  195. * This is why right after unmapping a page/section and invalidating
  196. * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
  197. * the IO subsystem will never hit in the cache.
  198. *
  199. * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
  200. * we then fully enforce cacheability of RAM, no matter what the guest
  201. * does.
  202. */
  203. static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
  204. phys_addr_t addr, phys_addr_t end)
  205. {
  206. phys_addr_t start_addr = addr;
  207. pte_t *pte, *start_pte;
  208. start_pte = pte = pte_offset_kernel(pmd, addr);
  209. do {
  210. if (!pte_none(*pte)) {
  211. pte_t old_pte = *pte;
  212. kvm_set_pte(pte, __pte(0));
  213. kvm_tlb_flush_vmid_ipa(kvm, addr);
  214. /* No need to invalidate the cache for device mappings */
  215. if (!kvm_is_device_pfn(pte_pfn(old_pte)))
  216. kvm_flush_dcache_pte(old_pte);
  217. put_page(virt_to_page(pte));
  218. }
  219. } while (pte++, addr += PAGE_SIZE, addr != end);
  220. if (stage2_pte_table_empty(start_pte))
  221. clear_stage2_pmd_entry(kvm, pmd, start_addr);
  222. }
  223. static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
  224. phys_addr_t addr, phys_addr_t end)
  225. {
  226. phys_addr_t next, start_addr = addr;
  227. pmd_t *pmd, *start_pmd;
  228. start_pmd = pmd = stage2_pmd_offset(pud, addr);
  229. do {
  230. next = stage2_pmd_addr_end(addr, end);
  231. if (!pmd_none(*pmd)) {
  232. if (pmd_thp_or_huge(*pmd)) {
  233. pmd_t old_pmd = *pmd;
  234. pmd_clear(pmd);
  235. kvm_tlb_flush_vmid_ipa(kvm, addr);
  236. kvm_flush_dcache_pmd(old_pmd);
  237. put_page(virt_to_page(pmd));
  238. } else {
  239. unmap_stage2_ptes(kvm, pmd, addr, next);
  240. }
  241. }
  242. } while (pmd++, addr = next, addr != end);
  243. if (stage2_pmd_table_empty(start_pmd))
  244. clear_stage2_pud_entry(kvm, pud, start_addr);
  245. }
  246. static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
  247. phys_addr_t addr, phys_addr_t end)
  248. {
  249. phys_addr_t next, start_addr = addr;
  250. pud_t *pud, *start_pud;
  251. start_pud = pud = stage2_pud_offset(pgd, addr);
  252. do {
  253. next = stage2_pud_addr_end(addr, end);
  254. if (!stage2_pud_none(*pud)) {
  255. if (stage2_pud_huge(*pud)) {
  256. pud_t old_pud = *pud;
  257. stage2_pud_clear(pud);
  258. kvm_tlb_flush_vmid_ipa(kvm, addr);
  259. kvm_flush_dcache_pud(old_pud);
  260. put_page(virt_to_page(pud));
  261. } else {
  262. unmap_stage2_pmds(kvm, pud, addr, next);
  263. }
  264. }
  265. } while (pud++, addr = next, addr != end);
  266. if (stage2_pud_table_empty(start_pud))
  267. clear_stage2_pgd_entry(kvm, pgd, start_addr);
  268. }
  269. /**
  270. * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
  271. * @kvm: The VM pointer
  272. * @start: The intermediate physical base address of the range to unmap
  273. * @size: The size of the area to unmap
  274. *
  275. * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
  276. * be called while holding mmu_lock (unless for freeing the stage2 pgd before
  277. * destroying the VM), otherwise another faulting VCPU may come in and mess
  278. * with things behind our backs.
  279. */
  280. static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
  281. {
  282. pgd_t *pgd;
  283. phys_addr_t addr = start, end = start + size;
  284. phys_addr_t next;
  285. assert_spin_locked(&kvm->mmu_lock);
  286. WARN_ON(size & ~PAGE_MASK);
  287. pgd = kvm->arch.pgd + stage2_pgd_index(addr);
  288. do {
  289. /*
  290. * Make sure the page table is still active, as another thread
  291. * could have possibly freed the page table, while we released
  292. * the lock.
  293. */
  294. if (!READ_ONCE(kvm->arch.pgd))
  295. break;
  296. next = stage2_pgd_addr_end(addr, end);
  297. if (!stage2_pgd_none(*pgd))
  298. unmap_stage2_puds(kvm, pgd, addr, next);
  299. /*
  300. * If the range is too large, release the kvm->mmu_lock
  301. * to prevent starvation and lockup detector warnings.
  302. */
  303. if (next != end)
  304. cond_resched_lock(&kvm->mmu_lock);
  305. } while (pgd++, addr = next, addr != end);
  306. }
  307. static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
  308. phys_addr_t addr, phys_addr_t end)
  309. {
  310. pte_t *pte;
  311. pte = pte_offset_kernel(pmd, addr);
  312. do {
  313. if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
  314. kvm_flush_dcache_pte(*pte);
  315. } while (pte++, addr += PAGE_SIZE, addr != end);
  316. }
  317. static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
  318. phys_addr_t addr, phys_addr_t end)
  319. {
  320. pmd_t *pmd;
  321. phys_addr_t next;
  322. pmd = stage2_pmd_offset(pud, addr);
  323. do {
  324. next = stage2_pmd_addr_end(addr, end);
  325. if (!pmd_none(*pmd)) {
  326. if (pmd_thp_or_huge(*pmd))
  327. kvm_flush_dcache_pmd(*pmd);
  328. else
  329. stage2_flush_ptes(kvm, pmd, addr, next);
  330. }
  331. } while (pmd++, addr = next, addr != end);
  332. }
  333. static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
  334. phys_addr_t addr, phys_addr_t end)
  335. {
  336. pud_t *pud;
  337. phys_addr_t next;
  338. pud = stage2_pud_offset(pgd, addr);
  339. do {
  340. next = stage2_pud_addr_end(addr, end);
  341. if (!stage2_pud_none(*pud)) {
  342. if (stage2_pud_huge(*pud))
  343. kvm_flush_dcache_pud(*pud);
  344. else
  345. stage2_flush_pmds(kvm, pud, addr, next);
  346. }
  347. } while (pud++, addr = next, addr != end);
  348. }
  349. static void stage2_flush_memslot(struct kvm *kvm,
  350. struct kvm_memory_slot *memslot)
  351. {
  352. phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
  353. phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
  354. phys_addr_t next;
  355. pgd_t *pgd;
  356. pgd = kvm->arch.pgd + stage2_pgd_index(addr);
  357. do {
  358. next = stage2_pgd_addr_end(addr, end);
  359. if (!stage2_pgd_none(*pgd))
  360. stage2_flush_puds(kvm, pgd, addr, next);
  361. } while (pgd++, addr = next, addr != end);
  362. }
  363. /**
  364. * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
  365. * @kvm: The struct kvm pointer
  366. *
  367. * Go through the stage 2 page tables and invalidate any cache lines
  368. * backing memory already mapped to the VM.
  369. */
  370. static void stage2_flush_vm(struct kvm *kvm)
  371. {
  372. struct kvm_memslots *slots;
  373. struct kvm_memory_slot *memslot;
  374. int idx;
  375. idx = srcu_read_lock(&kvm->srcu);
  376. spin_lock(&kvm->mmu_lock);
  377. slots = kvm_memslots(kvm);
  378. kvm_for_each_memslot(memslot, slots)
  379. stage2_flush_memslot(kvm, memslot);
  380. spin_unlock(&kvm->mmu_lock);
  381. srcu_read_unlock(&kvm->srcu, idx);
  382. }
  383. static void clear_hyp_pgd_entry(pgd_t *pgd)
  384. {
  385. pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
  386. pgd_clear(pgd);
  387. pud_free(NULL, pud_table);
  388. put_page(virt_to_page(pgd));
  389. }
  390. static void clear_hyp_pud_entry(pud_t *pud)
  391. {
  392. pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
  393. VM_BUG_ON(pud_huge(*pud));
  394. pud_clear(pud);
  395. pmd_free(NULL, pmd_table);
  396. put_page(virt_to_page(pud));
  397. }
  398. static void clear_hyp_pmd_entry(pmd_t *pmd)
  399. {
  400. pte_t *pte_table = pte_offset_kernel(pmd, 0);
  401. VM_BUG_ON(pmd_thp_or_huge(*pmd));
  402. pmd_clear(pmd);
  403. pte_free_kernel(NULL, pte_table);
  404. put_page(virt_to_page(pmd));
  405. }
  406. static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
  407. {
  408. pte_t *pte, *start_pte;
  409. start_pte = pte = pte_offset_kernel(pmd, addr);
  410. do {
  411. if (!pte_none(*pte)) {
  412. kvm_set_pte(pte, __pte(0));
  413. put_page(virt_to_page(pte));
  414. }
  415. } while (pte++, addr += PAGE_SIZE, addr != end);
  416. if (hyp_pte_table_empty(start_pte))
  417. clear_hyp_pmd_entry(pmd);
  418. }
  419. static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
  420. {
  421. phys_addr_t next;
  422. pmd_t *pmd, *start_pmd;
  423. start_pmd = pmd = pmd_offset(pud, addr);
  424. do {
  425. next = pmd_addr_end(addr, end);
  426. /* Hyp doesn't use huge pmds */
  427. if (!pmd_none(*pmd))
  428. unmap_hyp_ptes(pmd, addr, next);
  429. } while (pmd++, addr = next, addr != end);
  430. if (hyp_pmd_table_empty(start_pmd))
  431. clear_hyp_pud_entry(pud);
  432. }
  433. static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
  434. {
  435. phys_addr_t next;
  436. pud_t *pud, *start_pud;
  437. start_pud = pud = pud_offset(pgd, addr);
  438. do {
  439. next = pud_addr_end(addr, end);
  440. /* Hyp doesn't use huge puds */
  441. if (!pud_none(*pud))
  442. unmap_hyp_pmds(pud, addr, next);
  443. } while (pud++, addr = next, addr != end);
  444. if (hyp_pud_table_empty(start_pud))
  445. clear_hyp_pgd_entry(pgd);
  446. }
  447. static unsigned int kvm_pgd_index(unsigned long addr, unsigned int ptrs_per_pgd)
  448. {
  449. return (addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1);
  450. }
  451. static void __unmap_hyp_range(pgd_t *pgdp, unsigned long ptrs_per_pgd,
  452. phys_addr_t start, u64 size)
  453. {
  454. pgd_t *pgd;
  455. phys_addr_t addr = start, end = start + size;
  456. phys_addr_t next;
  457. /*
  458. * We don't unmap anything from HYP, except at the hyp tear down.
  459. * Hence, we don't have to invalidate the TLBs here.
  460. */
  461. pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
  462. do {
  463. next = pgd_addr_end(addr, end);
  464. if (!pgd_none(*pgd))
  465. unmap_hyp_puds(pgd, addr, next);
  466. } while (pgd++, addr = next, addr != end);
  467. }
  468. static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
  469. {
  470. __unmap_hyp_range(pgdp, PTRS_PER_PGD, start, size);
  471. }
  472. static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size)
  473. {
  474. __unmap_hyp_range(pgdp, __kvm_idmap_ptrs_per_pgd(), start, size);
  475. }
  476. /**
  477. * free_hyp_pgds - free Hyp-mode page tables
  478. *
  479. * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
  480. * therefore contains either mappings in the kernel memory area (above
  481. * PAGE_OFFSET), or device mappings in the idmap range.
  482. *
  483. * boot_hyp_pgd should only map the idmap range, and is only used in
  484. * the extended idmap case.
  485. */
  486. void free_hyp_pgds(void)
  487. {
  488. pgd_t *id_pgd;
  489. mutex_lock(&kvm_hyp_pgd_mutex);
  490. id_pgd = boot_hyp_pgd ? boot_hyp_pgd : hyp_pgd;
  491. if (id_pgd) {
  492. /* In case we never called hyp_mmu_init() */
  493. if (!io_map_base)
  494. io_map_base = hyp_idmap_start;
  495. unmap_hyp_idmap_range(id_pgd, io_map_base,
  496. hyp_idmap_start + PAGE_SIZE - io_map_base);
  497. }
  498. if (boot_hyp_pgd) {
  499. free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
  500. boot_hyp_pgd = NULL;
  501. }
  502. if (hyp_pgd) {
  503. unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
  504. (uintptr_t)high_memory - PAGE_OFFSET);
  505. free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
  506. hyp_pgd = NULL;
  507. }
  508. if (merged_hyp_pgd) {
  509. clear_page(merged_hyp_pgd);
  510. free_page((unsigned long)merged_hyp_pgd);
  511. merged_hyp_pgd = NULL;
  512. }
  513. mutex_unlock(&kvm_hyp_pgd_mutex);
  514. }
  515. static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
  516. unsigned long end, unsigned long pfn,
  517. pgprot_t prot)
  518. {
  519. pte_t *pte;
  520. unsigned long addr;
  521. addr = start;
  522. do {
  523. pte = pte_offset_kernel(pmd, addr);
  524. kvm_set_pte(pte, pfn_pte(pfn, prot));
  525. get_page(virt_to_page(pte));
  526. pfn++;
  527. } while (addr += PAGE_SIZE, addr != end);
  528. }
  529. static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
  530. unsigned long end, unsigned long pfn,
  531. pgprot_t prot)
  532. {
  533. pmd_t *pmd;
  534. pte_t *pte;
  535. unsigned long addr, next;
  536. addr = start;
  537. do {
  538. pmd = pmd_offset(pud, addr);
  539. BUG_ON(pmd_sect(*pmd));
  540. if (pmd_none(*pmd)) {
  541. pte = pte_alloc_one_kernel(NULL, addr);
  542. if (!pte) {
  543. kvm_err("Cannot allocate Hyp pte\n");
  544. return -ENOMEM;
  545. }
  546. kvm_pmd_populate(pmd, pte);
  547. get_page(virt_to_page(pmd));
  548. }
  549. next = pmd_addr_end(addr, end);
  550. create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
  551. pfn += (next - addr) >> PAGE_SHIFT;
  552. } while (addr = next, addr != end);
  553. return 0;
  554. }
  555. static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
  556. unsigned long end, unsigned long pfn,
  557. pgprot_t prot)
  558. {
  559. pud_t *pud;
  560. pmd_t *pmd;
  561. unsigned long addr, next;
  562. int ret;
  563. addr = start;
  564. do {
  565. pud = pud_offset(pgd, addr);
  566. if (pud_none_or_clear_bad(pud)) {
  567. pmd = pmd_alloc_one(NULL, addr);
  568. if (!pmd) {
  569. kvm_err("Cannot allocate Hyp pmd\n");
  570. return -ENOMEM;
  571. }
  572. kvm_pud_populate(pud, pmd);
  573. get_page(virt_to_page(pud));
  574. }
  575. next = pud_addr_end(addr, end);
  576. ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
  577. if (ret)
  578. return ret;
  579. pfn += (next - addr) >> PAGE_SHIFT;
  580. } while (addr = next, addr != end);
  581. return 0;
  582. }
  583. static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
  584. unsigned long start, unsigned long end,
  585. unsigned long pfn, pgprot_t prot)
  586. {
  587. pgd_t *pgd;
  588. pud_t *pud;
  589. unsigned long addr, next;
  590. int err = 0;
  591. mutex_lock(&kvm_hyp_pgd_mutex);
  592. addr = start & PAGE_MASK;
  593. end = PAGE_ALIGN(end);
  594. do {
  595. pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
  596. if (pgd_none(*pgd)) {
  597. pud = pud_alloc_one(NULL, addr);
  598. if (!pud) {
  599. kvm_err("Cannot allocate Hyp pud\n");
  600. err = -ENOMEM;
  601. goto out;
  602. }
  603. kvm_pgd_populate(pgd, pud);
  604. get_page(virt_to_page(pgd));
  605. }
  606. next = pgd_addr_end(addr, end);
  607. err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
  608. if (err)
  609. goto out;
  610. pfn += (next - addr) >> PAGE_SHIFT;
  611. } while (addr = next, addr != end);
  612. out:
  613. mutex_unlock(&kvm_hyp_pgd_mutex);
  614. return err;
  615. }
  616. static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
  617. {
  618. if (!is_vmalloc_addr(kaddr)) {
  619. BUG_ON(!virt_addr_valid(kaddr));
  620. return __pa(kaddr);
  621. } else {
  622. return page_to_phys(vmalloc_to_page(kaddr)) +
  623. offset_in_page(kaddr);
  624. }
  625. }
  626. /**
  627. * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
  628. * @from: The virtual kernel start address of the range
  629. * @to: The virtual kernel end address of the range (exclusive)
  630. * @prot: The protection to be applied to this range
  631. *
  632. * The same virtual address as the kernel virtual address is also used
  633. * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
  634. * physical pages.
  635. */
  636. int create_hyp_mappings(void *from, void *to, pgprot_t prot)
  637. {
  638. phys_addr_t phys_addr;
  639. unsigned long virt_addr;
  640. unsigned long start = kern_hyp_va((unsigned long)from);
  641. unsigned long end = kern_hyp_va((unsigned long)to);
  642. if (is_kernel_in_hyp_mode())
  643. return 0;
  644. start = start & PAGE_MASK;
  645. end = PAGE_ALIGN(end);
  646. for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
  647. int err;
  648. phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
  649. err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD,
  650. virt_addr, virt_addr + PAGE_SIZE,
  651. __phys_to_pfn(phys_addr),
  652. prot);
  653. if (err)
  654. return err;
  655. }
  656. return 0;
  657. }
  658. static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
  659. unsigned long *haddr, pgprot_t prot)
  660. {
  661. pgd_t *pgd = hyp_pgd;
  662. unsigned long base;
  663. int ret = 0;
  664. mutex_lock(&kvm_hyp_pgd_mutex);
  665. /*
  666. * This assumes that we we have enough space below the idmap
  667. * page to allocate our VAs. If not, the check below will
  668. * kick. A potential alternative would be to detect that
  669. * overflow and switch to an allocation above the idmap.
  670. *
  671. * The allocated size is always a multiple of PAGE_SIZE.
  672. */
  673. size = PAGE_ALIGN(size + offset_in_page(phys_addr));
  674. base = io_map_base - size;
  675. /*
  676. * Verify that BIT(VA_BITS - 1) hasn't been flipped by
  677. * allocating the new area, as it would indicate we've
  678. * overflowed the idmap/IO address range.
  679. */
  680. if ((base ^ io_map_base) & BIT(VA_BITS - 1))
  681. ret = -ENOMEM;
  682. else
  683. io_map_base = base;
  684. mutex_unlock(&kvm_hyp_pgd_mutex);
  685. if (ret)
  686. goto out;
  687. if (__kvm_cpu_uses_extended_idmap())
  688. pgd = boot_hyp_pgd;
  689. ret = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
  690. base, base + size,
  691. __phys_to_pfn(phys_addr), prot);
  692. if (ret)
  693. goto out;
  694. *haddr = base + offset_in_page(phys_addr);
  695. out:
  696. return ret;
  697. }
  698. /**
  699. * create_hyp_io_mappings - Map IO into both kernel and HYP
  700. * @phys_addr: The physical start address which gets mapped
  701. * @size: Size of the region being mapped
  702. * @kaddr: Kernel VA for this mapping
  703. * @haddr: HYP VA for this mapping
  704. */
  705. int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
  706. void __iomem **kaddr,
  707. void __iomem **haddr)
  708. {
  709. unsigned long addr;
  710. int ret;
  711. *kaddr = ioremap(phys_addr, size);
  712. if (!*kaddr)
  713. return -ENOMEM;
  714. if (is_kernel_in_hyp_mode()) {
  715. *haddr = *kaddr;
  716. return 0;
  717. }
  718. ret = __create_hyp_private_mapping(phys_addr, size,
  719. &addr, PAGE_HYP_DEVICE);
  720. if (ret) {
  721. iounmap(*kaddr);
  722. *kaddr = NULL;
  723. *haddr = NULL;
  724. return ret;
  725. }
  726. *haddr = (void __iomem *)addr;
  727. return 0;
  728. }
  729. /**
  730. * create_hyp_exec_mappings - Map an executable range into HYP
  731. * @phys_addr: The physical start address which gets mapped
  732. * @size: Size of the region being mapped
  733. * @haddr: HYP VA for this mapping
  734. */
  735. int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
  736. void **haddr)
  737. {
  738. unsigned long addr;
  739. int ret;
  740. BUG_ON(is_kernel_in_hyp_mode());
  741. ret = __create_hyp_private_mapping(phys_addr, size,
  742. &addr, PAGE_HYP_EXEC);
  743. if (ret) {
  744. *haddr = NULL;
  745. return ret;
  746. }
  747. *haddr = (void *)addr;
  748. return 0;
  749. }
  750. /**
  751. * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
  752. * @kvm: The KVM struct pointer for the VM.
  753. *
  754. * Allocates only the stage-2 HW PGD level table(s) (can support either full
  755. * 40-bit input addresses or limited to 32-bit input addresses). Clears the
  756. * allocated pages.
  757. *
  758. * Note we don't need locking here as this is only called when the VM is
  759. * created, which can only be done once.
  760. */
  761. int kvm_alloc_stage2_pgd(struct kvm *kvm)
  762. {
  763. pgd_t *pgd;
  764. if (kvm->arch.pgd != NULL) {
  765. kvm_err("kvm_arch already initialized?\n");
  766. return -EINVAL;
  767. }
  768. /* Allocate the HW PGD, making sure that each page gets its own refcount */
  769. pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO);
  770. if (!pgd)
  771. return -ENOMEM;
  772. kvm->arch.pgd = pgd;
  773. return 0;
  774. }
  775. static void stage2_unmap_memslot(struct kvm *kvm,
  776. struct kvm_memory_slot *memslot)
  777. {
  778. hva_t hva = memslot->userspace_addr;
  779. phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
  780. phys_addr_t size = PAGE_SIZE * memslot->npages;
  781. hva_t reg_end = hva + size;
  782. /*
  783. * A memory region could potentially cover multiple VMAs, and any holes
  784. * between them, so iterate over all of them to find out if we should
  785. * unmap any of them.
  786. *
  787. * +--------------------------------------------+
  788. * +---------------+----------------+ +----------------+
  789. * | : VMA 1 | VMA 2 | | VMA 3 : |
  790. * +---------------+----------------+ +----------------+
  791. * | memory region |
  792. * +--------------------------------------------+
  793. */
  794. do {
  795. struct vm_area_struct *vma = find_vma(current->mm, hva);
  796. hva_t vm_start, vm_end;
  797. if (!vma || vma->vm_start >= reg_end)
  798. break;
  799. /*
  800. * Take the intersection of this VMA with the memory region
  801. */
  802. vm_start = max(hva, vma->vm_start);
  803. vm_end = min(reg_end, vma->vm_end);
  804. if (!(vma->vm_flags & VM_PFNMAP)) {
  805. gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
  806. unmap_stage2_range(kvm, gpa, vm_end - vm_start);
  807. }
  808. hva = vm_end;
  809. } while (hva < reg_end);
  810. }
  811. /**
  812. * stage2_unmap_vm - Unmap Stage-2 RAM mappings
  813. * @kvm: The struct kvm pointer
  814. *
  815. * Go through the memregions and unmap any reguler RAM
  816. * backing memory already mapped to the VM.
  817. */
  818. void stage2_unmap_vm(struct kvm *kvm)
  819. {
  820. struct kvm_memslots *slots;
  821. struct kvm_memory_slot *memslot;
  822. int idx;
  823. idx = srcu_read_lock(&kvm->srcu);
  824. down_read(&current->mm->mmap_sem);
  825. spin_lock(&kvm->mmu_lock);
  826. slots = kvm_memslots(kvm);
  827. kvm_for_each_memslot(memslot, slots)
  828. stage2_unmap_memslot(kvm, memslot);
  829. spin_unlock(&kvm->mmu_lock);
  830. up_read(&current->mm->mmap_sem);
  831. srcu_read_unlock(&kvm->srcu, idx);
  832. }
  833. /**
  834. * kvm_free_stage2_pgd - free all stage-2 tables
  835. * @kvm: The KVM struct pointer for the VM.
  836. *
  837. * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
  838. * underlying level-2 and level-3 tables before freeing the actual level-1 table
  839. * and setting the struct pointer to NULL.
  840. */
  841. void kvm_free_stage2_pgd(struct kvm *kvm)
  842. {
  843. void *pgd = NULL;
  844. spin_lock(&kvm->mmu_lock);
  845. if (kvm->arch.pgd) {
  846. unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
  847. pgd = READ_ONCE(kvm->arch.pgd);
  848. kvm->arch.pgd = NULL;
  849. }
  850. spin_unlock(&kvm->mmu_lock);
  851. /* Free the HW pgd, one page at a time */
  852. if (pgd)
  853. free_pages_exact(pgd, S2_PGD_SIZE);
  854. }
  855. static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
  856. phys_addr_t addr)
  857. {
  858. pgd_t *pgd;
  859. pud_t *pud;
  860. pgd = kvm->arch.pgd + stage2_pgd_index(addr);
  861. if (WARN_ON(stage2_pgd_none(*pgd))) {
  862. if (!cache)
  863. return NULL;
  864. pud = mmu_memory_cache_alloc(cache);
  865. stage2_pgd_populate(pgd, pud);
  866. get_page(virt_to_page(pgd));
  867. }
  868. return stage2_pud_offset(pgd, addr);
  869. }
  870. static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
  871. phys_addr_t addr)
  872. {
  873. pud_t *pud;
  874. pmd_t *pmd;
  875. pud = stage2_get_pud(kvm, cache, addr);
  876. if (!pud)
  877. return NULL;
  878. if (stage2_pud_none(*pud)) {
  879. if (!cache)
  880. return NULL;
  881. pmd = mmu_memory_cache_alloc(cache);
  882. stage2_pud_populate(pud, pmd);
  883. get_page(virt_to_page(pud));
  884. }
  885. return stage2_pmd_offset(pud, addr);
  886. }
  887. static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
  888. *cache, phys_addr_t addr, const pmd_t *new_pmd)
  889. {
  890. pmd_t *pmd, old_pmd;
  891. pmd = stage2_get_pmd(kvm, cache, addr);
  892. VM_BUG_ON(!pmd);
  893. old_pmd = *pmd;
  894. if (pmd_present(old_pmd)) {
  895. /*
  896. * Multiple vcpus faulting on the same PMD entry, can
  897. * lead to them sequentially updating the PMD with the
  898. * same value. Following the break-before-make
  899. * (pmd_clear() followed by tlb_flush()) process can
  900. * hinder forward progress due to refaults generated
  901. * on missing translations.
  902. *
  903. * Skip updating the page table if the entry is
  904. * unchanged.
  905. */
  906. if (pmd_val(old_pmd) == pmd_val(*new_pmd))
  907. return 0;
  908. /*
  909. * Mapping in huge pages should only happen through a
  910. * fault. If a page is merged into a transparent huge
  911. * page, the individual subpages of that huge page
  912. * should be unmapped through MMU notifiers before we
  913. * get here.
  914. *
  915. * Merging of CompoundPages is not supported; they
  916. * should become splitting first, unmapped, merged,
  917. * and mapped back in on-demand.
  918. */
  919. VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
  920. pmd_clear(pmd);
  921. kvm_tlb_flush_vmid_ipa(kvm, addr);
  922. } else {
  923. get_page(virt_to_page(pmd));
  924. }
  925. kvm_set_pmd(pmd, *new_pmd);
  926. return 0;
  927. }
  928. static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
  929. {
  930. pmd_t *pmdp;
  931. pte_t *ptep;
  932. pmdp = stage2_get_pmd(kvm, NULL, addr);
  933. if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
  934. return false;
  935. if (pmd_thp_or_huge(*pmdp))
  936. return kvm_s2pmd_exec(pmdp);
  937. ptep = pte_offset_kernel(pmdp, addr);
  938. if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
  939. return false;
  940. return kvm_s2pte_exec(ptep);
  941. }
  942. static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
  943. phys_addr_t addr, const pte_t *new_pte,
  944. unsigned long flags)
  945. {
  946. pmd_t *pmd;
  947. pte_t *pte, old_pte;
  948. bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
  949. bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
  950. VM_BUG_ON(logging_active && !cache);
  951. /* Create stage-2 page table mapping - Levels 0 and 1 */
  952. pmd = stage2_get_pmd(kvm, cache, addr);
  953. if (!pmd) {
  954. /*
  955. * Ignore calls from kvm_set_spte_hva for unallocated
  956. * address ranges.
  957. */
  958. return 0;
  959. }
  960. /*
  961. * While dirty page logging - dissolve huge PMD, then continue on to
  962. * allocate page.
  963. */
  964. if (logging_active)
  965. stage2_dissolve_pmd(kvm, addr, pmd);
  966. /* Create stage-2 page mappings - Level 2 */
  967. if (pmd_none(*pmd)) {
  968. if (!cache)
  969. return 0; /* ignore calls from kvm_set_spte_hva */
  970. pte = mmu_memory_cache_alloc(cache);
  971. kvm_pmd_populate(pmd, pte);
  972. get_page(virt_to_page(pmd));
  973. }
  974. pte = pte_offset_kernel(pmd, addr);
  975. if (iomap && pte_present(*pte))
  976. return -EFAULT;
  977. /* Create 2nd stage page table mapping - Level 3 */
  978. old_pte = *pte;
  979. if (pte_present(old_pte)) {
  980. /* Skip page table update if there is no change */
  981. if (pte_val(old_pte) == pte_val(*new_pte))
  982. return 0;
  983. kvm_set_pte(pte, __pte(0));
  984. kvm_tlb_flush_vmid_ipa(kvm, addr);
  985. } else {
  986. get_page(virt_to_page(pte));
  987. }
  988. kvm_set_pte(pte, *new_pte);
  989. return 0;
  990. }
  991. #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  992. static int stage2_ptep_test_and_clear_young(pte_t *pte)
  993. {
  994. if (pte_young(*pte)) {
  995. *pte = pte_mkold(*pte);
  996. return 1;
  997. }
  998. return 0;
  999. }
  1000. #else
  1001. static int stage2_ptep_test_and_clear_young(pte_t *pte)
  1002. {
  1003. return __ptep_test_and_clear_young(pte);
  1004. }
  1005. #endif
  1006. static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
  1007. {
  1008. return stage2_ptep_test_and_clear_young((pte_t *)pmd);
  1009. }
  1010. /**
  1011. * kvm_phys_addr_ioremap - map a device range to guest IPA
  1012. *
  1013. * @kvm: The KVM pointer
  1014. * @guest_ipa: The IPA at which to insert the mapping
  1015. * @pa: The physical address of the device
  1016. * @size: The size of the mapping
  1017. */
  1018. int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
  1019. phys_addr_t pa, unsigned long size, bool writable)
  1020. {
  1021. phys_addr_t addr, end;
  1022. int ret = 0;
  1023. unsigned long pfn;
  1024. struct kvm_mmu_memory_cache cache = { 0, };
  1025. end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
  1026. pfn = __phys_to_pfn(pa);
  1027. for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
  1028. pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
  1029. if (writable)
  1030. pte = kvm_s2pte_mkwrite(pte);
  1031. ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
  1032. KVM_NR_MEM_OBJS);
  1033. if (ret)
  1034. goto out;
  1035. spin_lock(&kvm->mmu_lock);
  1036. ret = stage2_set_pte(kvm, &cache, addr, &pte,
  1037. KVM_S2PTE_FLAG_IS_IOMAP);
  1038. spin_unlock(&kvm->mmu_lock);
  1039. if (ret)
  1040. goto out;
  1041. pfn++;
  1042. }
  1043. out:
  1044. mmu_free_memory_cache(&cache);
  1045. return ret;
  1046. }
  1047. static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
  1048. {
  1049. kvm_pfn_t pfn = *pfnp;
  1050. gfn_t gfn = *ipap >> PAGE_SHIFT;
  1051. struct page *page = pfn_to_page(pfn);
  1052. /*
  1053. * PageTransCompoungMap() returns true for THP and
  1054. * hugetlbfs. Make sure the adjustment is done only for THP
  1055. * pages.
  1056. */
  1057. if (!PageHuge(page) && PageTransCompoundMap(page)) {
  1058. unsigned long mask;
  1059. /*
  1060. * The address we faulted on is backed by a transparent huge
  1061. * page. However, because we map the compound huge page and
  1062. * not the individual tail page, we need to transfer the
  1063. * refcount to the head page. We have to be careful that the
  1064. * THP doesn't start to split while we are adjusting the
  1065. * refcounts.
  1066. *
  1067. * We are sure this doesn't happen, because mmu_notifier_retry
  1068. * was successful and we are holding the mmu_lock, so if this
  1069. * THP is trying to split, it will be blocked in the mmu
  1070. * notifier before touching any of the pages, specifically
  1071. * before being able to call __split_huge_page_refcount().
  1072. *
  1073. * We can therefore safely transfer the refcount from PG_tail
  1074. * to PG_head and switch the pfn from a tail page to the head
  1075. * page accordingly.
  1076. */
  1077. mask = PTRS_PER_PMD - 1;
  1078. VM_BUG_ON((gfn & mask) != (pfn & mask));
  1079. if (pfn & mask) {
  1080. *ipap &= PMD_MASK;
  1081. kvm_release_pfn_clean(pfn);
  1082. pfn &= ~mask;
  1083. kvm_get_pfn(pfn);
  1084. *pfnp = pfn;
  1085. }
  1086. return true;
  1087. }
  1088. return false;
  1089. }
  1090. static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
  1091. {
  1092. if (kvm_vcpu_trap_is_iabt(vcpu))
  1093. return false;
  1094. return kvm_vcpu_dabt_iswrite(vcpu);
  1095. }
  1096. /**
  1097. * stage2_wp_ptes - write protect PMD range
  1098. * @pmd: pointer to pmd entry
  1099. * @addr: range start address
  1100. * @end: range end address
  1101. */
  1102. static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
  1103. {
  1104. pte_t *pte;
  1105. pte = pte_offset_kernel(pmd, addr);
  1106. do {
  1107. if (!pte_none(*pte)) {
  1108. if (!kvm_s2pte_readonly(pte))
  1109. kvm_set_s2pte_readonly(pte);
  1110. }
  1111. } while (pte++, addr += PAGE_SIZE, addr != end);
  1112. }
  1113. /**
  1114. * stage2_wp_pmds - write protect PUD range
  1115. * @pud: pointer to pud entry
  1116. * @addr: range start address
  1117. * @end: range end address
  1118. */
  1119. static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
  1120. {
  1121. pmd_t *pmd;
  1122. phys_addr_t next;
  1123. pmd = stage2_pmd_offset(pud, addr);
  1124. do {
  1125. next = stage2_pmd_addr_end(addr, end);
  1126. if (!pmd_none(*pmd)) {
  1127. if (pmd_thp_or_huge(*pmd)) {
  1128. if (!kvm_s2pmd_readonly(pmd))
  1129. kvm_set_s2pmd_readonly(pmd);
  1130. } else {
  1131. stage2_wp_ptes(pmd, addr, next);
  1132. }
  1133. }
  1134. } while (pmd++, addr = next, addr != end);
  1135. }
  1136. /**
  1137. * stage2_wp_puds - write protect PGD range
  1138. * @pgd: pointer to pgd entry
  1139. * @addr: range start address
  1140. * @end: range end address
  1141. *
  1142. * Process PUD entries, for a huge PUD we cause a panic.
  1143. */
  1144. static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
  1145. {
  1146. pud_t *pud;
  1147. phys_addr_t next;
  1148. pud = stage2_pud_offset(pgd, addr);
  1149. do {
  1150. next = stage2_pud_addr_end(addr, end);
  1151. if (!stage2_pud_none(*pud)) {
  1152. /* TODO:PUD not supported, revisit later if supported */
  1153. BUG_ON(stage2_pud_huge(*pud));
  1154. stage2_wp_pmds(pud, addr, next);
  1155. }
  1156. } while (pud++, addr = next, addr != end);
  1157. }
  1158. /**
  1159. * stage2_wp_range() - write protect stage2 memory region range
  1160. * @kvm: The KVM pointer
  1161. * @addr: Start address of range
  1162. * @end: End address of range
  1163. */
  1164. static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
  1165. {
  1166. pgd_t *pgd;
  1167. phys_addr_t next;
  1168. pgd = kvm->arch.pgd + stage2_pgd_index(addr);
  1169. do {
  1170. /*
  1171. * Release kvm_mmu_lock periodically if the memory region is
  1172. * large. Otherwise, we may see kernel panics with
  1173. * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
  1174. * CONFIG_LOCKDEP. Additionally, holding the lock too long
  1175. * will also starve other vCPUs. We have to also make sure
  1176. * that the page tables are not freed while we released
  1177. * the lock.
  1178. */
  1179. cond_resched_lock(&kvm->mmu_lock);
  1180. if (!READ_ONCE(kvm->arch.pgd))
  1181. break;
  1182. next = stage2_pgd_addr_end(addr, end);
  1183. if (stage2_pgd_present(*pgd))
  1184. stage2_wp_puds(pgd, addr, next);
  1185. } while (pgd++, addr = next, addr != end);
  1186. }
  1187. /**
  1188. * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
  1189. * @kvm: The KVM pointer
  1190. * @slot: The memory slot to write protect
  1191. *
  1192. * Called to start logging dirty pages after memory region
  1193. * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
  1194. * all present PMD and PTEs are write protected in the memory region.
  1195. * Afterwards read of dirty page log can be called.
  1196. *
  1197. * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
  1198. * serializing operations for VM memory regions.
  1199. */
  1200. void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
  1201. {
  1202. struct kvm_memslots *slots = kvm_memslots(kvm);
  1203. struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
  1204. phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
  1205. phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
  1206. spin_lock(&kvm->mmu_lock);
  1207. stage2_wp_range(kvm, start, end);
  1208. spin_unlock(&kvm->mmu_lock);
  1209. kvm_flush_remote_tlbs(kvm);
  1210. }
  1211. /**
  1212. * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
  1213. * @kvm: The KVM pointer
  1214. * @slot: The memory slot associated with mask
  1215. * @gfn_offset: The gfn offset in memory slot
  1216. * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
  1217. * slot to be write protected
  1218. *
  1219. * Walks bits set in mask write protects the associated pte's. Caller must
  1220. * acquire kvm_mmu_lock.
  1221. */
  1222. static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
  1223. struct kvm_memory_slot *slot,
  1224. gfn_t gfn_offset, unsigned long mask)
  1225. {
  1226. phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
  1227. phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
  1228. phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
  1229. stage2_wp_range(kvm, start, end);
  1230. }
  1231. /*
  1232. * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
  1233. * dirty pages.
  1234. *
  1235. * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
  1236. * enable dirty logging for them.
  1237. */
  1238. void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
  1239. struct kvm_memory_slot *slot,
  1240. gfn_t gfn_offset, unsigned long mask)
  1241. {
  1242. kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
  1243. }
  1244. static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
  1245. {
  1246. __clean_dcache_guest_page(pfn, size);
  1247. }
  1248. static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
  1249. {
  1250. __invalidate_icache_guest_page(pfn, size);
  1251. }
  1252. static void kvm_send_hwpoison_signal(unsigned long address,
  1253. struct vm_area_struct *vma)
  1254. {
  1255. siginfo_t info;
  1256. clear_siginfo(&info);
  1257. info.si_signo = SIGBUS;
  1258. info.si_errno = 0;
  1259. info.si_code = BUS_MCEERR_AR;
  1260. info.si_addr = (void __user *)address;
  1261. if (is_vm_hugetlb_page(vma))
  1262. info.si_addr_lsb = huge_page_shift(hstate_vma(vma));
  1263. else
  1264. info.si_addr_lsb = PAGE_SHIFT;
  1265. send_sig_info(SIGBUS, &info, current);
  1266. }
  1267. static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
  1268. struct kvm_memory_slot *memslot, unsigned long hva,
  1269. unsigned long fault_status)
  1270. {
  1271. int ret;
  1272. bool write_fault, exec_fault, writable, hugetlb = false, force_pte = false;
  1273. unsigned long mmu_seq;
  1274. gfn_t gfn = fault_ipa >> PAGE_SHIFT;
  1275. struct kvm *kvm = vcpu->kvm;
  1276. struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
  1277. struct vm_area_struct *vma;
  1278. kvm_pfn_t pfn;
  1279. pgprot_t mem_type = PAGE_S2;
  1280. bool logging_active = memslot_is_logging(memslot);
  1281. unsigned long flags = 0;
  1282. write_fault = kvm_is_write_fault(vcpu);
  1283. exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
  1284. VM_BUG_ON(write_fault && exec_fault);
  1285. if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
  1286. kvm_err("Unexpected L2 read permission error\n");
  1287. return -EFAULT;
  1288. }
  1289. /* Let's check if we will get back a huge page backed by hugetlbfs */
  1290. down_read(&current->mm->mmap_sem);
  1291. vma = find_vma_intersection(current->mm, hva, hva + 1);
  1292. if (unlikely(!vma)) {
  1293. kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
  1294. up_read(&current->mm->mmap_sem);
  1295. return -EFAULT;
  1296. }
  1297. if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
  1298. hugetlb = true;
  1299. gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
  1300. } else {
  1301. /*
  1302. * Pages belonging to memslots that don't have the same
  1303. * alignment for userspace and IPA cannot be mapped using
  1304. * block descriptors even if the pages belong to a THP for
  1305. * the process, because the stage-2 block descriptor will
  1306. * cover more than a single THP and we loose atomicity for
  1307. * unmapping, updates, and splits of the THP or other pages
  1308. * in the stage-2 block range.
  1309. */
  1310. if ((memslot->userspace_addr & ~PMD_MASK) !=
  1311. ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
  1312. force_pte = true;
  1313. }
  1314. up_read(&current->mm->mmap_sem);
  1315. /* We need minimum second+third level pages */
  1316. ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
  1317. KVM_NR_MEM_OBJS);
  1318. if (ret)
  1319. return ret;
  1320. mmu_seq = vcpu->kvm->mmu_notifier_seq;
  1321. /*
  1322. * Ensure the read of mmu_notifier_seq happens before we call
  1323. * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
  1324. * the page we just got a reference to gets unmapped before we have a
  1325. * chance to grab the mmu_lock, which ensure that if the page gets
  1326. * unmapped afterwards, the call to kvm_unmap_hva will take it away
  1327. * from us again properly. This smp_rmb() interacts with the smp_wmb()
  1328. * in kvm_mmu_notifier_invalidate_<page|range_end>.
  1329. */
  1330. smp_rmb();
  1331. pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
  1332. if (pfn == KVM_PFN_ERR_HWPOISON) {
  1333. kvm_send_hwpoison_signal(hva, vma);
  1334. return 0;
  1335. }
  1336. if (is_error_noslot_pfn(pfn))
  1337. return -EFAULT;
  1338. if (kvm_is_device_pfn(pfn)) {
  1339. mem_type = PAGE_S2_DEVICE;
  1340. flags |= KVM_S2PTE_FLAG_IS_IOMAP;
  1341. } else if (logging_active) {
  1342. /*
  1343. * Faults on pages in a memslot with logging enabled
  1344. * should not be mapped with huge pages (it introduces churn
  1345. * and performance degradation), so force a pte mapping.
  1346. */
  1347. force_pte = true;
  1348. flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
  1349. /*
  1350. * Only actually map the page as writable if this was a write
  1351. * fault.
  1352. */
  1353. if (!write_fault)
  1354. writable = false;
  1355. }
  1356. spin_lock(&kvm->mmu_lock);
  1357. if (mmu_notifier_retry(kvm, mmu_seq))
  1358. goto out_unlock;
  1359. if (!hugetlb && !force_pte)
  1360. hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
  1361. if (hugetlb) {
  1362. pmd_t new_pmd = pfn_pmd(pfn, mem_type);
  1363. new_pmd = pmd_mkhuge(new_pmd);
  1364. if (writable) {
  1365. new_pmd = kvm_s2pmd_mkwrite(new_pmd);
  1366. kvm_set_pfn_dirty(pfn);
  1367. }
  1368. if (fault_status != FSC_PERM)
  1369. clean_dcache_guest_page(pfn, PMD_SIZE);
  1370. if (exec_fault) {
  1371. new_pmd = kvm_s2pmd_mkexec(new_pmd);
  1372. invalidate_icache_guest_page(pfn, PMD_SIZE);
  1373. } else if (fault_status == FSC_PERM) {
  1374. /* Preserve execute if XN was already cleared */
  1375. if (stage2_is_exec(kvm, fault_ipa))
  1376. new_pmd = kvm_s2pmd_mkexec(new_pmd);
  1377. }
  1378. ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
  1379. } else {
  1380. pte_t new_pte = pfn_pte(pfn, mem_type);
  1381. if (writable) {
  1382. new_pte = kvm_s2pte_mkwrite(new_pte);
  1383. kvm_set_pfn_dirty(pfn);
  1384. mark_page_dirty(kvm, gfn);
  1385. }
  1386. if (fault_status != FSC_PERM)
  1387. clean_dcache_guest_page(pfn, PAGE_SIZE);
  1388. if (exec_fault) {
  1389. new_pte = kvm_s2pte_mkexec(new_pte);
  1390. invalidate_icache_guest_page(pfn, PAGE_SIZE);
  1391. } else if (fault_status == FSC_PERM) {
  1392. /* Preserve execute if XN was already cleared */
  1393. if (stage2_is_exec(kvm, fault_ipa))
  1394. new_pte = kvm_s2pte_mkexec(new_pte);
  1395. }
  1396. ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
  1397. }
  1398. out_unlock:
  1399. spin_unlock(&kvm->mmu_lock);
  1400. kvm_set_pfn_accessed(pfn);
  1401. kvm_release_pfn_clean(pfn);
  1402. return ret;
  1403. }
  1404. /*
  1405. * Resolve the access fault by making the page young again.
  1406. * Note that because the faulting entry is guaranteed not to be
  1407. * cached in the TLB, we don't need to invalidate anything.
  1408. * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
  1409. * so there is no need for atomic (pte|pmd)_mkyoung operations.
  1410. */
  1411. static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
  1412. {
  1413. pmd_t *pmd;
  1414. pte_t *pte;
  1415. kvm_pfn_t pfn;
  1416. bool pfn_valid = false;
  1417. trace_kvm_access_fault(fault_ipa);
  1418. spin_lock(&vcpu->kvm->mmu_lock);
  1419. pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
  1420. if (!pmd || pmd_none(*pmd)) /* Nothing there */
  1421. goto out;
  1422. if (pmd_thp_or_huge(*pmd)) { /* THP, HugeTLB */
  1423. *pmd = pmd_mkyoung(*pmd);
  1424. pfn = pmd_pfn(*pmd);
  1425. pfn_valid = true;
  1426. goto out;
  1427. }
  1428. pte = pte_offset_kernel(pmd, fault_ipa);
  1429. if (pte_none(*pte)) /* Nothing there either */
  1430. goto out;
  1431. *pte = pte_mkyoung(*pte); /* Just a page... */
  1432. pfn = pte_pfn(*pte);
  1433. pfn_valid = true;
  1434. out:
  1435. spin_unlock(&vcpu->kvm->mmu_lock);
  1436. if (pfn_valid)
  1437. kvm_set_pfn_accessed(pfn);
  1438. }
  1439. /**
  1440. * kvm_handle_guest_abort - handles all 2nd stage aborts
  1441. * @vcpu: the VCPU pointer
  1442. * @run: the kvm_run structure
  1443. *
  1444. * Any abort that gets to the host is almost guaranteed to be caused by a
  1445. * missing second stage translation table entry, which can mean that either the
  1446. * guest simply needs more memory and we must allocate an appropriate page or it
  1447. * can mean that the guest tried to access I/O memory, which is emulated by user
  1448. * space. The distinction is based on the IPA causing the fault and whether this
  1449. * memory region has been registered as standard RAM by user space.
  1450. */
  1451. int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1452. {
  1453. unsigned long fault_status;
  1454. phys_addr_t fault_ipa;
  1455. struct kvm_memory_slot *memslot;
  1456. unsigned long hva;
  1457. bool is_iabt, write_fault, writable;
  1458. gfn_t gfn;
  1459. int ret, idx;
  1460. fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
  1461. fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
  1462. is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
  1463. /* Synchronous External Abort? */
  1464. if (kvm_vcpu_dabt_isextabt(vcpu)) {
  1465. /*
  1466. * For RAS the host kernel may handle this abort.
  1467. * There is no need to pass the error into the guest.
  1468. */
  1469. if (!handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
  1470. return 1;
  1471. if (unlikely(!is_iabt)) {
  1472. kvm_inject_vabt(vcpu);
  1473. return 1;
  1474. }
  1475. }
  1476. trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
  1477. kvm_vcpu_get_hfar(vcpu), fault_ipa);
  1478. /* Check the stage-2 fault is trans. fault or write fault */
  1479. if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
  1480. fault_status != FSC_ACCESS) {
  1481. kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
  1482. kvm_vcpu_trap_get_class(vcpu),
  1483. (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
  1484. (unsigned long)kvm_vcpu_get_hsr(vcpu));
  1485. return -EFAULT;
  1486. }
  1487. idx = srcu_read_lock(&vcpu->kvm->srcu);
  1488. gfn = fault_ipa >> PAGE_SHIFT;
  1489. memslot = gfn_to_memslot(vcpu->kvm, gfn);
  1490. hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
  1491. write_fault = kvm_is_write_fault(vcpu);
  1492. if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
  1493. if (is_iabt) {
  1494. /* Prefetch Abort on I/O address */
  1495. kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
  1496. ret = 1;
  1497. goto out_unlock;
  1498. }
  1499. /*
  1500. * Check for a cache maintenance operation. Since we
  1501. * ended-up here, we know it is outside of any memory
  1502. * slot. But we can't find out if that is for a device,
  1503. * or if the guest is just being stupid. The only thing
  1504. * we know for sure is that this range cannot be cached.
  1505. *
  1506. * So let's assume that the guest is just being
  1507. * cautious, and skip the instruction.
  1508. */
  1509. if (kvm_vcpu_dabt_is_cm(vcpu)) {
  1510. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  1511. ret = 1;
  1512. goto out_unlock;
  1513. }
  1514. /*
  1515. * The IPA is reported as [MAX:12], so we need to
  1516. * complement it with the bottom 12 bits from the
  1517. * faulting VA. This is always 12 bits, irrespective
  1518. * of the page size.
  1519. */
  1520. fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
  1521. ret = io_mem_abort(vcpu, run, fault_ipa);
  1522. goto out_unlock;
  1523. }
  1524. /* Userspace should not be able to register out-of-bounds IPAs */
  1525. VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
  1526. if (fault_status == FSC_ACCESS) {
  1527. handle_access_fault(vcpu, fault_ipa);
  1528. ret = 1;
  1529. goto out_unlock;
  1530. }
  1531. ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
  1532. if (ret == 0)
  1533. ret = 1;
  1534. out_unlock:
  1535. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  1536. return ret;
  1537. }
  1538. static int handle_hva_to_gpa(struct kvm *kvm,
  1539. unsigned long start,
  1540. unsigned long end,
  1541. int (*handler)(struct kvm *kvm,
  1542. gpa_t gpa, u64 size,
  1543. void *data),
  1544. void *data)
  1545. {
  1546. struct kvm_memslots *slots;
  1547. struct kvm_memory_slot *memslot;
  1548. int ret = 0;
  1549. slots = kvm_memslots(kvm);
  1550. /* we only care about the pages that the guest sees */
  1551. kvm_for_each_memslot(memslot, slots) {
  1552. unsigned long hva_start, hva_end;
  1553. gfn_t gpa;
  1554. hva_start = max(start, memslot->userspace_addr);
  1555. hva_end = min(end, memslot->userspace_addr +
  1556. (memslot->npages << PAGE_SHIFT));
  1557. if (hva_start >= hva_end)
  1558. continue;
  1559. gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
  1560. ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
  1561. }
  1562. return ret;
  1563. }
  1564. static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
  1565. {
  1566. unmap_stage2_range(kvm, gpa, size);
  1567. return 0;
  1568. }
  1569. int kvm_unmap_hva_range(struct kvm *kvm,
  1570. unsigned long start, unsigned long end)
  1571. {
  1572. if (!kvm->arch.pgd)
  1573. return 0;
  1574. trace_kvm_unmap_hva_range(start, end);
  1575. handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
  1576. return 0;
  1577. }
  1578. static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
  1579. {
  1580. pte_t *pte = (pte_t *)data;
  1581. WARN_ON(size != PAGE_SIZE);
  1582. /*
  1583. * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
  1584. * flag clear because MMU notifiers will have unmapped a huge PMD before
  1585. * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
  1586. * therefore stage2_set_pte() never needs to clear out a huge PMD
  1587. * through this calling path.
  1588. */
  1589. stage2_set_pte(kvm, NULL, gpa, pte, 0);
  1590. return 0;
  1591. }
  1592. void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
  1593. {
  1594. unsigned long end = hva + PAGE_SIZE;
  1595. kvm_pfn_t pfn = pte_pfn(pte);
  1596. pte_t stage2_pte;
  1597. if (!kvm->arch.pgd)
  1598. return;
  1599. trace_kvm_set_spte_hva(hva);
  1600. /*
  1601. * We've moved a page around, probably through CoW, so let's treat it
  1602. * just like a translation fault and clean the cache to the PoC.
  1603. */
  1604. clean_dcache_guest_page(pfn, PAGE_SIZE);
  1605. stage2_pte = pfn_pte(pfn, PAGE_S2);
  1606. handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
  1607. }
  1608. static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
  1609. {
  1610. pmd_t *pmd;
  1611. pte_t *pte;
  1612. WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
  1613. pmd = stage2_get_pmd(kvm, NULL, gpa);
  1614. if (!pmd || pmd_none(*pmd)) /* Nothing there */
  1615. return 0;
  1616. if (pmd_thp_or_huge(*pmd)) /* THP, HugeTLB */
  1617. return stage2_pmdp_test_and_clear_young(pmd);
  1618. pte = pte_offset_kernel(pmd, gpa);
  1619. if (pte_none(*pte))
  1620. return 0;
  1621. return stage2_ptep_test_and_clear_young(pte);
  1622. }
  1623. static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
  1624. {
  1625. pmd_t *pmd;
  1626. pte_t *pte;
  1627. WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
  1628. pmd = stage2_get_pmd(kvm, NULL, gpa);
  1629. if (!pmd || pmd_none(*pmd)) /* Nothing there */
  1630. return 0;
  1631. if (pmd_thp_or_huge(*pmd)) /* THP, HugeTLB */
  1632. return pmd_young(*pmd);
  1633. pte = pte_offset_kernel(pmd, gpa);
  1634. if (!pte_none(*pte)) /* Just a page... */
  1635. return pte_young(*pte);
  1636. return 0;
  1637. }
  1638. int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
  1639. {
  1640. if (!kvm->arch.pgd)
  1641. return 0;
  1642. trace_kvm_age_hva(start, end);
  1643. return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
  1644. }
  1645. int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
  1646. {
  1647. if (!kvm->arch.pgd)
  1648. return 0;
  1649. trace_kvm_test_age_hva(hva);
  1650. return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
  1651. kvm_test_age_hva_handler, NULL);
  1652. }
  1653. void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
  1654. {
  1655. mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
  1656. }
  1657. phys_addr_t kvm_mmu_get_httbr(void)
  1658. {
  1659. if (__kvm_cpu_uses_extended_idmap())
  1660. return virt_to_phys(merged_hyp_pgd);
  1661. else
  1662. return virt_to_phys(hyp_pgd);
  1663. }
  1664. phys_addr_t kvm_get_idmap_vector(void)
  1665. {
  1666. return hyp_idmap_vector;
  1667. }
  1668. static int kvm_map_idmap_text(pgd_t *pgd)
  1669. {
  1670. int err;
  1671. /* Create the idmap in the boot page tables */
  1672. err = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
  1673. hyp_idmap_start, hyp_idmap_end,
  1674. __phys_to_pfn(hyp_idmap_start),
  1675. PAGE_HYP_EXEC);
  1676. if (err)
  1677. kvm_err("Failed to idmap %lx-%lx\n",
  1678. hyp_idmap_start, hyp_idmap_end);
  1679. return err;
  1680. }
  1681. int kvm_mmu_init(void)
  1682. {
  1683. int err;
  1684. hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
  1685. hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
  1686. hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
  1687. hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
  1688. hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
  1689. /*
  1690. * We rely on the linker script to ensure at build time that the HYP
  1691. * init code does not cross a page boundary.
  1692. */
  1693. BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
  1694. kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
  1695. kvm_debug("HYP VA range: %lx:%lx\n",
  1696. kern_hyp_va(PAGE_OFFSET),
  1697. kern_hyp_va((unsigned long)high_memory - 1));
  1698. if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
  1699. hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) &&
  1700. hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
  1701. /*
  1702. * The idmap page is intersecting with the VA space,
  1703. * it is not safe to continue further.
  1704. */
  1705. kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
  1706. err = -EINVAL;
  1707. goto out;
  1708. }
  1709. hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
  1710. if (!hyp_pgd) {
  1711. kvm_err("Hyp mode PGD not allocated\n");
  1712. err = -ENOMEM;
  1713. goto out;
  1714. }
  1715. if (__kvm_cpu_uses_extended_idmap()) {
  1716. boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
  1717. hyp_pgd_order);
  1718. if (!boot_hyp_pgd) {
  1719. kvm_err("Hyp boot PGD not allocated\n");
  1720. err = -ENOMEM;
  1721. goto out;
  1722. }
  1723. err = kvm_map_idmap_text(boot_hyp_pgd);
  1724. if (err)
  1725. goto out;
  1726. merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
  1727. if (!merged_hyp_pgd) {
  1728. kvm_err("Failed to allocate extra HYP pgd\n");
  1729. goto out;
  1730. }
  1731. __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
  1732. hyp_idmap_start);
  1733. } else {
  1734. err = kvm_map_idmap_text(hyp_pgd);
  1735. if (err)
  1736. goto out;
  1737. }
  1738. io_map_base = hyp_idmap_start;
  1739. return 0;
  1740. out:
  1741. free_hyp_pgds();
  1742. return err;
  1743. }
  1744. void kvm_arch_commit_memory_region(struct kvm *kvm,
  1745. const struct kvm_userspace_memory_region *mem,
  1746. const struct kvm_memory_slot *old,
  1747. const struct kvm_memory_slot *new,
  1748. enum kvm_mr_change change)
  1749. {
  1750. /*
  1751. * At this point memslot has been committed and there is an
  1752. * allocated dirty_bitmap[], dirty pages will be be tracked while the
  1753. * memory slot is write protected.
  1754. */
  1755. if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
  1756. kvm_mmu_wp_memory_region(kvm, mem->slot);
  1757. }
  1758. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  1759. struct kvm_memory_slot *memslot,
  1760. const struct kvm_userspace_memory_region *mem,
  1761. enum kvm_mr_change change)
  1762. {
  1763. hva_t hva = mem->userspace_addr;
  1764. hva_t reg_end = hva + mem->memory_size;
  1765. bool writable = !(mem->flags & KVM_MEM_READONLY);
  1766. int ret = 0;
  1767. if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
  1768. change != KVM_MR_FLAGS_ONLY)
  1769. return 0;
  1770. /*
  1771. * Prevent userspace from creating a memory region outside of the IPA
  1772. * space addressable by the KVM guest IPA space.
  1773. */
  1774. if (memslot->base_gfn + memslot->npages >=
  1775. (KVM_PHYS_SIZE >> PAGE_SHIFT))
  1776. return -EFAULT;
  1777. down_read(&current->mm->mmap_sem);
  1778. /*
  1779. * A memory region could potentially cover multiple VMAs, and any holes
  1780. * between them, so iterate over all of them to find out if we can map
  1781. * any of them right now.
  1782. *
  1783. * +--------------------------------------------+
  1784. * +---------------+----------------+ +----------------+
  1785. * | : VMA 1 | VMA 2 | | VMA 3 : |
  1786. * +---------------+----------------+ +----------------+
  1787. * | memory region |
  1788. * +--------------------------------------------+
  1789. */
  1790. do {
  1791. struct vm_area_struct *vma = find_vma(current->mm, hva);
  1792. hva_t vm_start, vm_end;
  1793. if (!vma || vma->vm_start >= reg_end)
  1794. break;
  1795. /*
  1796. * Mapping a read-only VMA is only allowed if the
  1797. * memory region is configured as read-only.
  1798. */
  1799. if (writable && !(vma->vm_flags & VM_WRITE)) {
  1800. ret = -EPERM;
  1801. break;
  1802. }
  1803. /*
  1804. * Take the intersection of this VMA with the memory region
  1805. */
  1806. vm_start = max(hva, vma->vm_start);
  1807. vm_end = min(reg_end, vma->vm_end);
  1808. if (vma->vm_flags & VM_PFNMAP) {
  1809. gpa_t gpa = mem->guest_phys_addr +
  1810. (vm_start - mem->userspace_addr);
  1811. phys_addr_t pa;
  1812. pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
  1813. pa += vm_start - vma->vm_start;
  1814. /* IO region dirty page logging not allowed */
  1815. if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
  1816. ret = -EINVAL;
  1817. goto out;
  1818. }
  1819. ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
  1820. vm_end - vm_start,
  1821. writable);
  1822. if (ret)
  1823. break;
  1824. }
  1825. hva = vm_end;
  1826. } while (hva < reg_end);
  1827. if (change == KVM_MR_FLAGS_ONLY)
  1828. goto out;
  1829. spin_lock(&kvm->mmu_lock);
  1830. if (ret)
  1831. unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
  1832. else
  1833. stage2_flush_memslot(kvm, memslot);
  1834. spin_unlock(&kvm->mmu_lock);
  1835. out:
  1836. up_read(&current->mm->mmap_sem);
  1837. return ret;
  1838. }
  1839. void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
  1840. struct kvm_memory_slot *dont)
  1841. {
  1842. }
  1843. int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
  1844. unsigned long npages)
  1845. {
  1846. return 0;
  1847. }
  1848. void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
  1849. {
  1850. }
  1851. void kvm_arch_flush_shadow_all(struct kvm *kvm)
  1852. {
  1853. kvm_free_stage2_pgd(kvm);
  1854. }
  1855. void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
  1856. struct kvm_memory_slot *slot)
  1857. {
  1858. gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
  1859. phys_addr_t size = slot->npages << PAGE_SHIFT;
  1860. spin_lock(&kvm->mmu_lock);
  1861. unmap_stage2_range(kvm, gpa, size);
  1862. spin_unlock(&kvm->mmu_lock);
  1863. }
  1864. /*
  1865. * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
  1866. *
  1867. * Main problems:
  1868. * - S/W ops are local to a CPU (not broadcast)
  1869. * - We have line migration behind our back (speculation)
  1870. * - System caches don't support S/W at all (damn!)
  1871. *
  1872. * In the face of the above, the best we can do is to try and convert
  1873. * S/W ops to VA ops. Because the guest is not allowed to infer the
  1874. * S/W to PA mapping, it can only use S/W to nuke the whole cache,
  1875. * which is a rather good thing for us.
  1876. *
  1877. * Also, it is only used when turning caches on/off ("The expected
  1878. * usage of the cache maintenance instructions that operate by set/way
  1879. * is associated with the cache maintenance instructions associated
  1880. * with the powerdown and powerup of caches, if this is required by
  1881. * the implementation.").
  1882. *
  1883. * We use the following policy:
  1884. *
  1885. * - If we trap a S/W operation, we enable VM trapping to detect
  1886. * caches being turned on/off, and do a full clean.
  1887. *
  1888. * - We flush the caches on both caches being turned on and off.
  1889. *
  1890. * - Once the caches are enabled, we stop trapping VM ops.
  1891. */
  1892. void kvm_set_way_flush(struct kvm_vcpu *vcpu)
  1893. {
  1894. unsigned long hcr = *vcpu_hcr(vcpu);
  1895. /*
  1896. * If this is the first time we do a S/W operation
  1897. * (i.e. HCR_TVM not set) flush the whole memory, and set the
  1898. * VM trapping.
  1899. *
  1900. * Otherwise, rely on the VM trapping to wait for the MMU +
  1901. * Caches to be turned off. At that point, we'll be able to
  1902. * clean the caches again.
  1903. */
  1904. if (!(hcr & HCR_TVM)) {
  1905. trace_kvm_set_way_flush(*vcpu_pc(vcpu),
  1906. vcpu_has_cache_enabled(vcpu));
  1907. stage2_flush_vm(vcpu->kvm);
  1908. *vcpu_hcr(vcpu) = hcr | HCR_TVM;
  1909. }
  1910. }
  1911. void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
  1912. {
  1913. bool now_enabled = vcpu_has_cache_enabled(vcpu);
  1914. /*
  1915. * If switching the MMU+caches on, need to invalidate the caches.
  1916. * If switching it off, need to clean the caches.
  1917. * Clean + invalidate does the trick always.
  1918. */
  1919. if (now_enabled != was_enabled)
  1920. stage2_flush_vm(vcpu->kvm);
  1921. /* Caches are now on, stop trapping VM ops (until a S/W op) */
  1922. if (now_enabled)
  1923. *vcpu_hcr(vcpu) &= ~HCR_TVM;
  1924. trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
  1925. }