hyp.S 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289
  1. /*
  2. * Copyright (C) 2012,2013 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/linkage.h>
  18. #include <asm/alternative.h>
  19. #include <asm/asm-offsets.h>
  20. #include <asm/assembler.h>
  21. #include <asm/cpufeature.h>
  22. #include <asm/debug-monitors.h>
  23. #include <asm/esr.h>
  24. #include <asm/fpsimdmacros.h>
  25. #include <asm/kvm.h>
  26. #include <asm/kvm_arm.h>
  27. #include <asm/kvm_asm.h>
  28. #include <asm/kvm_mmu.h>
  29. #include <asm/memory.h>
  30. #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
  31. #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
  32. #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
  33. #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
  34. .text
  35. .pushsection .hyp.text, "ax"
  36. .align PAGE_SHIFT
  37. .macro save_common_regs
  38. // x2: base address for cpu context
  39. // x3: tmp register
  40. add x3, x2, #CPU_XREG_OFFSET(19)
  41. stp x19, x20, [x3]
  42. stp x21, x22, [x3, #16]
  43. stp x23, x24, [x3, #32]
  44. stp x25, x26, [x3, #48]
  45. stp x27, x28, [x3, #64]
  46. stp x29, lr, [x3, #80]
  47. mrs x19, sp_el0
  48. mrs x20, elr_el2 // pc before entering el2
  49. mrs x21, spsr_el2 // pstate before entering el2
  50. stp x19, x20, [x3, #96]
  51. str x21, [x3, #112]
  52. mrs x22, sp_el1
  53. mrs x23, elr_el1
  54. mrs x24, spsr_el1
  55. str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
  56. str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
  57. str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
  58. .endm
  59. .macro restore_common_regs
  60. // x2: base address for cpu context
  61. // x3: tmp register
  62. ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
  63. ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
  64. ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
  65. msr sp_el1, x22
  66. msr elr_el1, x23
  67. msr spsr_el1, x24
  68. add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
  69. ldp x19, x20, [x3]
  70. ldr x21, [x3, #16]
  71. msr sp_el0, x19
  72. msr elr_el2, x20 // pc on return from el2
  73. msr spsr_el2, x21 // pstate on return from el2
  74. add x3, x2, #CPU_XREG_OFFSET(19)
  75. ldp x19, x20, [x3]
  76. ldp x21, x22, [x3, #16]
  77. ldp x23, x24, [x3, #32]
  78. ldp x25, x26, [x3, #48]
  79. ldp x27, x28, [x3, #64]
  80. ldp x29, lr, [x3, #80]
  81. .endm
  82. .macro save_host_regs
  83. save_common_regs
  84. .endm
  85. .macro restore_host_regs
  86. restore_common_regs
  87. .endm
  88. .macro save_fpsimd
  89. // x2: cpu context address
  90. // x3, x4: tmp regs
  91. add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
  92. fpsimd_save x3, 4
  93. .endm
  94. .macro restore_fpsimd
  95. // x2: cpu context address
  96. // x3, x4: tmp regs
  97. add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
  98. fpsimd_restore x3, 4
  99. .endm
  100. .macro save_guest_regs
  101. // x0 is the vcpu address
  102. // x1 is the return code, do not corrupt!
  103. // x2 is the cpu context
  104. // x3 is a tmp register
  105. // Guest's x0-x3 are on the stack
  106. // Compute base to save registers
  107. add x3, x2, #CPU_XREG_OFFSET(4)
  108. stp x4, x5, [x3]
  109. stp x6, x7, [x3, #16]
  110. stp x8, x9, [x3, #32]
  111. stp x10, x11, [x3, #48]
  112. stp x12, x13, [x3, #64]
  113. stp x14, x15, [x3, #80]
  114. stp x16, x17, [x3, #96]
  115. str x18, [x3, #112]
  116. pop x6, x7 // x2, x3
  117. pop x4, x5 // x0, x1
  118. add x3, x2, #CPU_XREG_OFFSET(0)
  119. stp x4, x5, [x3]
  120. stp x6, x7, [x3, #16]
  121. save_common_regs
  122. .endm
  123. .macro restore_guest_regs
  124. // x0 is the vcpu address.
  125. // x2 is the cpu context
  126. // x3 is a tmp register
  127. // Prepare x0-x3 for later restore
  128. add x3, x2, #CPU_XREG_OFFSET(0)
  129. ldp x4, x5, [x3]
  130. ldp x6, x7, [x3, #16]
  131. push x4, x5 // Push x0-x3 on the stack
  132. push x6, x7
  133. // x4-x18
  134. ldp x4, x5, [x3, #32]
  135. ldp x6, x7, [x3, #48]
  136. ldp x8, x9, [x3, #64]
  137. ldp x10, x11, [x3, #80]
  138. ldp x12, x13, [x3, #96]
  139. ldp x14, x15, [x3, #112]
  140. ldp x16, x17, [x3, #128]
  141. ldr x18, [x3, #144]
  142. // x19-x29, lr, sp*, elr*, spsr*
  143. restore_common_regs
  144. // Last bits of the 64bit state
  145. pop x2, x3
  146. pop x0, x1
  147. // Do not touch any register after this!
  148. .endm
  149. /*
  150. * Macros to perform system register save/restore.
  151. *
  152. * Ordering here is absolutely critical, and must be kept consistent
  153. * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
  154. * and in kvm_asm.h.
  155. *
  156. * In other words, don't touch any of these unless you know what
  157. * you are doing.
  158. */
  159. .macro save_sysregs
  160. // x2: base address for cpu context
  161. // x3: tmp register
  162. add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
  163. mrs x4, vmpidr_el2
  164. mrs x5, csselr_el1
  165. mrs x6, sctlr_el1
  166. mrs x7, actlr_el1
  167. mrs x8, cpacr_el1
  168. mrs x9, ttbr0_el1
  169. mrs x10, ttbr1_el1
  170. mrs x11, tcr_el1
  171. mrs x12, esr_el1
  172. mrs x13, afsr0_el1
  173. mrs x14, afsr1_el1
  174. mrs x15, far_el1
  175. mrs x16, mair_el1
  176. mrs x17, vbar_el1
  177. mrs x18, contextidr_el1
  178. mrs x19, tpidr_el0
  179. mrs x20, tpidrro_el0
  180. mrs x21, tpidr_el1
  181. mrs x22, amair_el1
  182. mrs x23, cntkctl_el1
  183. mrs x24, par_el1
  184. mrs x25, mdscr_el1
  185. stp x4, x5, [x3]
  186. stp x6, x7, [x3, #16]
  187. stp x8, x9, [x3, #32]
  188. stp x10, x11, [x3, #48]
  189. stp x12, x13, [x3, #64]
  190. stp x14, x15, [x3, #80]
  191. stp x16, x17, [x3, #96]
  192. stp x18, x19, [x3, #112]
  193. stp x20, x21, [x3, #128]
  194. stp x22, x23, [x3, #144]
  195. stp x24, x25, [x3, #160]
  196. .endm
  197. .macro save_debug
  198. // x2: base address for cpu context
  199. // x3: tmp register
  200. mrs x26, id_aa64dfr0_el1
  201. ubfx x24, x26, #12, #4 // Extract BRPs
  202. ubfx x25, x26, #20, #4 // Extract WRPs
  203. mov w26, #15
  204. sub w24, w26, w24 // How many BPs to skip
  205. sub w25, w26, w25 // How many WPs to skip
  206. add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
  207. adr x26, 1f
  208. add x26, x26, x24, lsl #2
  209. br x26
  210. 1:
  211. mrs x20, dbgbcr15_el1
  212. mrs x19, dbgbcr14_el1
  213. mrs x18, dbgbcr13_el1
  214. mrs x17, dbgbcr12_el1
  215. mrs x16, dbgbcr11_el1
  216. mrs x15, dbgbcr10_el1
  217. mrs x14, dbgbcr9_el1
  218. mrs x13, dbgbcr8_el1
  219. mrs x12, dbgbcr7_el1
  220. mrs x11, dbgbcr6_el1
  221. mrs x10, dbgbcr5_el1
  222. mrs x9, dbgbcr4_el1
  223. mrs x8, dbgbcr3_el1
  224. mrs x7, dbgbcr2_el1
  225. mrs x6, dbgbcr1_el1
  226. mrs x5, dbgbcr0_el1
  227. adr x26, 1f
  228. add x26, x26, x24, lsl #2
  229. br x26
  230. 1:
  231. str x20, [x3, #(15 * 8)]
  232. str x19, [x3, #(14 * 8)]
  233. str x18, [x3, #(13 * 8)]
  234. str x17, [x3, #(12 * 8)]
  235. str x16, [x3, #(11 * 8)]
  236. str x15, [x3, #(10 * 8)]
  237. str x14, [x3, #(9 * 8)]
  238. str x13, [x3, #(8 * 8)]
  239. str x12, [x3, #(7 * 8)]
  240. str x11, [x3, #(6 * 8)]
  241. str x10, [x3, #(5 * 8)]
  242. str x9, [x3, #(4 * 8)]
  243. str x8, [x3, #(3 * 8)]
  244. str x7, [x3, #(2 * 8)]
  245. str x6, [x3, #(1 * 8)]
  246. str x5, [x3, #(0 * 8)]
  247. add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
  248. adr x26, 1f
  249. add x26, x26, x24, lsl #2
  250. br x26
  251. 1:
  252. mrs x20, dbgbvr15_el1
  253. mrs x19, dbgbvr14_el1
  254. mrs x18, dbgbvr13_el1
  255. mrs x17, dbgbvr12_el1
  256. mrs x16, dbgbvr11_el1
  257. mrs x15, dbgbvr10_el1
  258. mrs x14, dbgbvr9_el1
  259. mrs x13, dbgbvr8_el1
  260. mrs x12, dbgbvr7_el1
  261. mrs x11, dbgbvr6_el1
  262. mrs x10, dbgbvr5_el1
  263. mrs x9, dbgbvr4_el1
  264. mrs x8, dbgbvr3_el1
  265. mrs x7, dbgbvr2_el1
  266. mrs x6, dbgbvr1_el1
  267. mrs x5, dbgbvr0_el1
  268. adr x26, 1f
  269. add x26, x26, x24, lsl #2
  270. br x26
  271. 1:
  272. str x20, [x3, #(15 * 8)]
  273. str x19, [x3, #(14 * 8)]
  274. str x18, [x3, #(13 * 8)]
  275. str x17, [x3, #(12 * 8)]
  276. str x16, [x3, #(11 * 8)]
  277. str x15, [x3, #(10 * 8)]
  278. str x14, [x3, #(9 * 8)]
  279. str x13, [x3, #(8 * 8)]
  280. str x12, [x3, #(7 * 8)]
  281. str x11, [x3, #(6 * 8)]
  282. str x10, [x3, #(5 * 8)]
  283. str x9, [x3, #(4 * 8)]
  284. str x8, [x3, #(3 * 8)]
  285. str x7, [x3, #(2 * 8)]
  286. str x6, [x3, #(1 * 8)]
  287. str x5, [x3, #(0 * 8)]
  288. add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
  289. adr x26, 1f
  290. add x26, x26, x25, lsl #2
  291. br x26
  292. 1:
  293. mrs x20, dbgwcr15_el1
  294. mrs x19, dbgwcr14_el1
  295. mrs x18, dbgwcr13_el1
  296. mrs x17, dbgwcr12_el1
  297. mrs x16, dbgwcr11_el1
  298. mrs x15, dbgwcr10_el1
  299. mrs x14, dbgwcr9_el1
  300. mrs x13, dbgwcr8_el1
  301. mrs x12, dbgwcr7_el1
  302. mrs x11, dbgwcr6_el1
  303. mrs x10, dbgwcr5_el1
  304. mrs x9, dbgwcr4_el1
  305. mrs x8, dbgwcr3_el1
  306. mrs x7, dbgwcr2_el1
  307. mrs x6, dbgwcr1_el1
  308. mrs x5, dbgwcr0_el1
  309. adr x26, 1f
  310. add x26, x26, x25, lsl #2
  311. br x26
  312. 1:
  313. str x20, [x3, #(15 * 8)]
  314. str x19, [x3, #(14 * 8)]
  315. str x18, [x3, #(13 * 8)]
  316. str x17, [x3, #(12 * 8)]
  317. str x16, [x3, #(11 * 8)]
  318. str x15, [x3, #(10 * 8)]
  319. str x14, [x3, #(9 * 8)]
  320. str x13, [x3, #(8 * 8)]
  321. str x12, [x3, #(7 * 8)]
  322. str x11, [x3, #(6 * 8)]
  323. str x10, [x3, #(5 * 8)]
  324. str x9, [x3, #(4 * 8)]
  325. str x8, [x3, #(3 * 8)]
  326. str x7, [x3, #(2 * 8)]
  327. str x6, [x3, #(1 * 8)]
  328. str x5, [x3, #(0 * 8)]
  329. add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
  330. adr x26, 1f
  331. add x26, x26, x25, lsl #2
  332. br x26
  333. 1:
  334. mrs x20, dbgwvr15_el1
  335. mrs x19, dbgwvr14_el1
  336. mrs x18, dbgwvr13_el1
  337. mrs x17, dbgwvr12_el1
  338. mrs x16, dbgwvr11_el1
  339. mrs x15, dbgwvr10_el1
  340. mrs x14, dbgwvr9_el1
  341. mrs x13, dbgwvr8_el1
  342. mrs x12, dbgwvr7_el1
  343. mrs x11, dbgwvr6_el1
  344. mrs x10, dbgwvr5_el1
  345. mrs x9, dbgwvr4_el1
  346. mrs x8, dbgwvr3_el1
  347. mrs x7, dbgwvr2_el1
  348. mrs x6, dbgwvr1_el1
  349. mrs x5, dbgwvr0_el1
  350. adr x26, 1f
  351. add x26, x26, x25, lsl #2
  352. br x26
  353. 1:
  354. str x20, [x3, #(15 * 8)]
  355. str x19, [x3, #(14 * 8)]
  356. str x18, [x3, #(13 * 8)]
  357. str x17, [x3, #(12 * 8)]
  358. str x16, [x3, #(11 * 8)]
  359. str x15, [x3, #(10 * 8)]
  360. str x14, [x3, #(9 * 8)]
  361. str x13, [x3, #(8 * 8)]
  362. str x12, [x3, #(7 * 8)]
  363. str x11, [x3, #(6 * 8)]
  364. str x10, [x3, #(5 * 8)]
  365. str x9, [x3, #(4 * 8)]
  366. str x8, [x3, #(3 * 8)]
  367. str x7, [x3, #(2 * 8)]
  368. str x6, [x3, #(1 * 8)]
  369. str x5, [x3, #(0 * 8)]
  370. mrs x21, mdccint_el1
  371. str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
  372. .endm
  373. .macro restore_sysregs
  374. // x2: base address for cpu context
  375. // x3: tmp register
  376. add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
  377. ldp x4, x5, [x3]
  378. ldp x6, x7, [x3, #16]
  379. ldp x8, x9, [x3, #32]
  380. ldp x10, x11, [x3, #48]
  381. ldp x12, x13, [x3, #64]
  382. ldp x14, x15, [x3, #80]
  383. ldp x16, x17, [x3, #96]
  384. ldp x18, x19, [x3, #112]
  385. ldp x20, x21, [x3, #128]
  386. ldp x22, x23, [x3, #144]
  387. ldp x24, x25, [x3, #160]
  388. msr vmpidr_el2, x4
  389. msr csselr_el1, x5
  390. msr sctlr_el1, x6
  391. msr actlr_el1, x7
  392. msr cpacr_el1, x8
  393. msr ttbr0_el1, x9
  394. msr ttbr1_el1, x10
  395. msr tcr_el1, x11
  396. msr esr_el1, x12
  397. msr afsr0_el1, x13
  398. msr afsr1_el1, x14
  399. msr far_el1, x15
  400. msr mair_el1, x16
  401. msr vbar_el1, x17
  402. msr contextidr_el1, x18
  403. msr tpidr_el0, x19
  404. msr tpidrro_el0, x20
  405. msr tpidr_el1, x21
  406. msr amair_el1, x22
  407. msr cntkctl_el1, x23
  408. msr par_el1, x24
  409. msr mdscr_el1, x25
  410. .endm
  411. .macro restore_debug
  412. // x2: base address for cpu context
  413. // x3: tmp register
  414. mrs x26, id_aa64dfr0_el1
  415. ubfx x24, x26, #12, #4 // Extract BRPs
  416. ubfx x25, x26, #20, #4 // Extract WRPs
  417. mov w26, #15
  418. sub w24, w26, w24 // How many BPs to skip
  419. sub w25, w26, w25 // How many WPs to skip
  420. add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
  421. adr x26, 1f
  422. add x26, x26, x24, lsl #2
  423. br x26
  424. 1:
  425. ldr x20, [x3, #(15 * 8)]
  426. ldr x19, [x3, #(14 * 8)]
  427. ldr x18, [x3, #(13 * 8)]
  428. ldr x17, [x3, #(12 * 8)]
  429. ldr x16, [x3, #(11 * 8)]
  430. ldr x15, [x3, #(10 * 8)]
  431. ldr x14, [x3, #(9 * 8)]
  432. ldr x13, [x3, #(8 * 8)]
  433. ldr x12, [x3, #(7 * 8)]
  434. ldr x11, [x3, #(6 * 8)]
  435. ldr x10, [x3, #(5 * 8)]
  436. ldr x9, [x3, #(4 * 8)]
  437. ldr x8, [x3, #(3 * 8)]
  438. ldr x7, [x3, #(2 * 8)]
  439. ldr x6, [x3, #(1 * 8)]
  440. ldr x5, [x3, #(0 * 8)]
  441. adr x26, 1f
  442. add x26, x26, x24, lsl #2
  443. br x26
  444. 1:
  445. msr dbgbcr15_el1, x20
  446. msr dbgbcr14_el1, x19
  447. msr dbgbcr13_el1, x18
  448. msr dbgbcr12_el1, x17
  449. msr dbgbcr11_el1, x16
  450. msr dbgbcr10_el1, x15
  451. msr dbgbcr9_el1, x14
  452. msr dbgbcr8_el1, x13
  453. msr dbgbcr7_el1, x12
  454. msr dbgbcr6_el1, x11
  455. msr dbgbcr5_el1, x10
  456. msr dbgbcr4_el1, x9
  457. msr dbgbcr3_el1, x8
  458. msr dbgbcr2_el1, x7
  459. msr dbgbcr1_el1, x6
  460. msr dbgbcr0_el1, x5
  461. add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
  462. adr x26, 1f
  463. add x26, x26, x24, lsl #2
  464. br x26
  465. 1:
  466. ldr x20, [x3, #(15 * 8)]
  467. ldr x19, [x3, #(14 * 8)]
  468. ldr x18, [x3, #(13 * 8)]
  469. ldr x17, [x3, #(12 * 8)]
  470. ldr x16, [x3, #(11 * 8)]
  471. ldr x15, [x3, #(10 * 8)]
  472. ldr x14, [x3, #(9 * 8)]
  473. ldr x13, [x3, #(8 * 8)]
  474. ldr x12, [x3, #(7 * 8)]
  475. ldr x11, [x3, #(6 * 8)]
  476. ldr x10, [x3, #(5 * 8)]
  477. ldr x9, [x3, #(4 * 8)]
  478. ldr x8, [x3, #(3 * 8)]
  479. ldr x7, [x3, #(2 * 8)]
  480. ldr x6, [x3, #(1 * 8)]
  481. ldr x5, [x3, #(0 * 8)]
  482. adr x26, 1f
  483. add x26, x26, x24, lsl #2
  484. br x26
  485. 1:
  486. msr dbgbvr15_el1, x20
  487. msr dbgbvr14_el1, x19
  488. msr dbgbvr13_el1, x18
  489. msr dbgbvr12_el1, x17
  490. msr dbgbvr11_el1, x16
  491. msr dbgbvr10_el1, x15
  492. msr dbgbvr9_el1, x14
  493. msr dbgbvr8_el1, x13
  494. msr dbgbvr7_el1, x12
  495. msr dbgbvr6_el1, x11
  496. msr dbgbvr5_el1, x10
  497. msr dbgbvr4_el1, x9
  498. msr dbgbvr3_el1, x8
  499. msr dbgbvr2_el1, x7
  500. msr dbgbvr1_el1, x6
  501. msr dbgbvr0_el1, x5
  502. add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
  503. adr x26, 1f
  504. add x26, x26, x25, lsl #2
  505. br x26
  506. 1:
  507. ldr x20, [x3, #(15 * 8)]
  508. ldr x19, [x3, #(14 * 8)]
  509. ldr x18, [x3, #(13 * 8)]
  510. ldr x17, [x3, #(12 * 8)]
  511. ldr x16, [x3, #(11 * 8)]
  512. ldr x15, [x3, #(10 * 8)]
  513. ldr x14, [x3, #(9 * 8)]
  514. ldr x13, [x3, #(8 * 8)]
  515. ldr x12, [x3, #(7 * 8)]
  516. ldr x11, [x3, #(6 * 8)]
  517. ldr x10, [x3, #(5 * 8)]
  518. ldr x9, [x3, #(4 * 8)]
  519. ldr x8, [x3, #(3 * 8)]
  520. ldr x7, [x3, #(2 * 8)]
  521. ldr x6, [x3, #(1 * 8)]
  522. ldr x5, [x3, #(0 * 8)]
  523. adr x26, 1f
  524. add x26, x26, x25, lsl #2
  525. br x26
  526. 1:
  527. msr dbgwcr15_el1, x20
  528. msr dbgwcr14_el1, x19
  529. msr dbgwcr13_el1, x18
  530. msr dbgwcr12_el1, x17
  531. msr dbgwcr11_el1, x16
  532. msr dbgwcr10_el1, x15
  533. msr dbgwcr9_el1, x14
  534. msr dbgwcr8_el1, x13
  535. msr dbgwcr7_el1, x12
  536. msr dbgwcr6_el1, x11
  537. msr dbgwcr5_el1, x10
  538. msr dbgwcr4_el1, x9
  539. msr dbgwcr3_el1, x8
  540. msr dbgwcr2_el1, x7
  541. msr dbgwcr1_el1, x6
  542. msr dbgwcr0_el1, x5
  543. add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
  544. adr x26, 1f
  545. add x26, x26, x25, lsl #2
  546. br x26
  547. 1:
  548. ldr x20, [x3, #(15 * 8)]
  549. ldr x19, [x3, #(14 * 8)]
  550. ldr x18, [x3, #(13 * 8)]
  551. ldr x17, [x3, #(12 * 8)]
  552. ldr x16, [x3, #(11 * 8)]
  553. ldr x15, [x3, #(10 * 8)]
  554. ldr x14, [x3, #(9 * 8)]
  555. ldr x13, [x3, #(8 * 8)]
  556. ldr x12, [x3, #(7 * 8)]
  557. ldr x11, [x3, #(6 * 8)]
  558. ldr x10, [x3, #(5 * 8)]
  559. ldr x9, [x3, #(4 * 8)]
  560. ldr x8, [x3, #(3 * 8)]
  561. ldr x7, [x3, #(2 * 8)]
  562. ldr x6, [x3, #(1 * 8)]
  563. ldr x5, [x3, #(0 * 8)]
  564. adr x26, 1f
  565. add x26, x26, x25, lsl #2
  566. br x26
  567. 1:
  568. msr dbgwvr15_el1, x20
  569. msr dbgwvr14_el1, x19
  570. msr dbgwvr13_el1, x18
  571. msr dbgwvr12_el1, x17
  572. msr dbgwvr11_el1, x16
  573. msr dbgwvr10_el1, x15
  574. msr dbgwvr9_el1, x14
  575. msr dbgwvr8_el1, x13
  576. msr dbgwvr7_el1, x12
  577. msr dbgwvr6_el1, x11
  578. msr dbgwvr5_el1, x10
  579. msr dbgwvr4_el1, x9
  580. msr dbgwvr3_el1, x8
  581. msr dbgwvr2_el1, x7
  582. msr dbgwvr1_el1, x6
  583. msr dbgwvr0_el1, x5
  584. ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
  585. msr mdccint_el1, x21
  586. .endm
  587. .macro skip_32bit_state tmp, target
  588. // Skip 32bit state if not needed
  589. mrs \tmp, hcr_el2
  590. tbnz \tmp, #HCR_RW_SHIFT, \target
  591. .endm
  592. .macro skip_tee_state tmp, target
  593. // Skip ThumbEE state if not needed
  594. mrs \tmp, id_pfr0_el1
  595. tbz \tmp, #12, \target
  596. .endm
  597. .macro skip_debug_state tmp, target
  598. ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
  599. tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
  600. .endm
  601. .macro compute_debug_state target
  602. // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
  603. // is set, we do a full save/restore cycle and disable trapping.
  604. add x25, x0, #VCPU_CONTEXT
  605. // Check the state of MDSCR_EL1
  606. ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
  607. and x26, x25, #DBG_MDSCR_KDE
  608. and x25, x25, #DBG_MDSCR_MDE
  609. adds xzr, x25, x26
  610. b.eq 9998f // Nothing to see there
  611. // If any interesting bits was set, we must set the flag
  612. mov x26, #KVM_ARM64_DEBUG_DIRTY
  613. str x26, [x0, #VCPU_DEBUG_FLAGS]
  614. b 9999f // Don't skip restore
  615. 9998:
  616. // Otherwise load the flags from memory in case we recently
  617. // trapped
  618. skip_debug_state x25, \target
  619. 9999:
  620. .endm
  621. .macro save_guest_32bit_state
  622. skip_32bit_state x3, 1f
  623. add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
  624. mrs x4, spsr_abt
  625. mrs x5, spsr_und
  626. mrs x6, spsr_irq
  627. mrs x7, spsr_fiq
  628. stp x4, x5, [x3]
  629. stp x6, x7, [x3, #16]
  630. add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
  631. mrs x4, dacr32_el2
  632. mrs x5, ifsr32_el2
  633. mrs x6, fpexc32_el2
  634. stp x4, x5, [x3]
  635. str x6, [x3, #16]
  636. skip_debug_state x8, 2f
  637. mrs x7, dbgvcr32_el2
  638. str x7, [x3, #24]
  639. 2:
  640. skip_tee_state x8, 1f
  641. add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
  642. mrs x4, teecr32_el1
  643. mrs x5, teehbr32_el1
  644. stp x4, x5, [x3]
  645. 1:
  646. .endm
  647. .macro restore_guest_32bit_state
  648. skip_32bit_state x3, 1f
  649. add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
  650. ldp x4, x5, [x3]
  651. ldp x6, x7, [x3, #16]
  652. msr spsr_abt, x4
  653. msr spsr_und, x5
  654. msr spsr_irq, x6
  655. msr spsr_fiq, x7
  656. add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
  657. ldp x4, x5, [x3]
  658. ldr x6, [x3, #16]
  659. msr dacr32_el2, x4
  660. msr ifsr32_el2, x5
  661. msr fpexc32_el2, x6
  662. skip_debug_state x8, 2f
  663. ldr x7, [x3, #24]
  664. msr dbgvcr32_el2, x7
  665. 2:
  666. skip_tee_state x8, 1f
  667. add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
  668. ldp x4, x5, [x3]
  669. msr teecr32_el1, x4
  670. msr teehbr32_el1, x5
  671. 1:
  672. .endm
  673. .macro activate_traps
  674. ldr x2, [x0, #VCPU_HCR_EL2]
  675. msr hcr_el2, x2
  676. mov x2, #CPTR_EL2_TTA
  677. msr cptr_el2, x2
  678. mov x2, #(1 << 15) // Trap CP15 Cr=15
  679. msr hstr_el2, x2
  680. mrs x2, mdcr_el2
  681. and x2, x2, #MDCR_EL2_HPMN_MASK
  682. orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
  683. orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
  684. // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
  685. // if not dirty.
  686. ldr x3, [x0, #VCPU_DEBUG_FLAGS]
  687. tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
  688. orr x2, x2, #MDCR_EL2_TDA
  689. 1:
  690. msr mdcr_el2, x2
  691. .endm
  692. .macro deactivate_traps
  693. mov x2, #HCR_RW
  694. msr hcr_el2, x2
  695. msr cptr_el2, xzr
  696. msr hstr_el2, xzr
  697. mrs x2, mdcr_el2
  698. and x2, x2, #MDCR_EL2_HPMN_MASK
  699. msr mdcr_el2, x2
  700. .endm
  701. .macro activate_vm
  702. ldr x1, [x0, #VCPU_KVM]
  703. kern_hyp_va x1
  704. ldr x2, [x1, #KVM_VTTBR]
  705. msr vttbr_el2, x2
  706. .endm
  707. .macro deactivate_vm
  708. msr vttbr_el2, xzr
  709. .endm
  710. /*
  711. * Call into the vgic backend for state saving
  712. */
  713. .macro save_vgic_state
  714. alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
  715. mrs x24, hcr_el2
  716. mov x25, #HCR_INT_OVERRIDE
  717. neg x25, x25
  718. and x24, x24, x25
  719. msr hcr_el2, x24
  720. .endm
  721. /*
  722. * Call into the vgic backend for state restoring
  723. */
  724. .macro restore_vgic_state
  725. mrs x24, hcr_el2
  726. ldr x25, [x0, #VCPU_IRQ_LINES]
  727. orr x24, x24, #HCR_INT_OVERRIDE
  728. orr x24, x24, x25
  729. msr hcr_el2, x24
  730. alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
  731. .endm
  732. .macro save_timer_state
  733. // x0: vcpu pointer
  734. ldr x2, [x0, #VCPU_KVM]
  735. kern_hyp_va x2
  736. ldr w3, [x2, #KVM_TIMER_ENABLED]
  737. cbz w3, 1f
  738. mrs x3, cntv_ctl_el0
  739. and x3, x3, #3
  740. str w3, [x0, #VCPU_TIMER_CNTV_CTL]
  741. bic x3, x3, #1 // Clear Enable
  742. msr cntv_ctl_el0, x3
  743. isb
  744. mrs x3, cntv_cval_el0
  745. str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
  746. 1:
  747. // Allow physical timer/counter access for the host
  748. mrs x2, cnthctl_el2
  749. orr x2, x2, #3
  750. msr cnthctl_el2, x2
  751. // Clear cntvoff for the host
  752. msr cntvoff_el2, xzr
  753. .endm
  754. .macro restore_timer_state
  755. // x0: vcpu pointer
  756. // Disallow physical timer access for the guest
  757. // Physical counter access is allowed
  758. mrs x2, cnthctl_el2
  759. orr x2, x2, #1
  760. bic x2, x2, #2
  761. msr cnthctl_el2, x2
  762. ldr x2, [x0, #VCPU_KVM]
  763. kern_hyp_va x2
  764. ldr w3, [x2, #KVM_TIMER_ENABLED]
  765. cbz w3, 1f
  766. ldr x3, [x2, #KVM_TIMER_CNTVOFF]
  767. msr cntvoff_el2, x3
  768. ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
  769. msr cntv_cval_el0, x2
  770. isb
  771. ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
  772. and x2, x2, #3
  773. msr cntv_ctl_el0, x2
  774. 1:
  775. .endm
  776. __save_sysregs:
  777. save_sysregs
  778. ret
  779. __restore_sysregs:
  780. restore_sysregs
  781. ret
  782. __save_debug:
  783. save_debug
  784. ret
  785. __restore_debug:
  786. restore_debug
  787. ret
  788. __save_fpsimd:
  789. save_fpsimd
  790. ret
  791. __restore_fpsimd:
  792. restore_fpsimd
  793. ret
  794. /*
  795. * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
  796. *
  797. * This is the world switch. The first half of the function
  798. * deals with entering the guest, and anything from __kvm_vcpu_return
  799. * to the end of the function deals with reentering the host.
  800. * On the enter path, only x0 (vcpu pointer) must be preserved until
  801. * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
  802. * code) must both be preserved until the epilogue.
  803. * In both cases, x2 points to the CPU context we're saving/restoring from/to.
  804. */
  805. ENTRY(__kvm_vcpu_run)
  806. kern_hyp_va x0
  807. msr tpidr_el2, x0 // Save the vcpu register
  808. // Host context
  809. ldr x2, [x0, #VCPU_HOST_CONTEXT]
  810. kern_hyp_va x2
  811. save_host_regs
  812. bl __save_fpsimd
  813. bl __save_sysregs
  814. compute_debug_state 1f
  815. bl __save_debug
  816. 1:
  817. activate_traps
  818. activate_vm
  819. restore_vgic_state
  820. restore_timer_state
  821. // Guest context
  822. add x2, x0, #VCPU_CONTEXT
  823. bl __restore_sysregs
  824. bl __restore_fpsimd
  825. skip_debug_state x3, 1f
  826. bl __restore_debug
  827. 1:
  828. restore_guest_32bit_state
  829. restore_guest_regs
  830. // That's it, no more messing around.
  831. eret
  832. __kvm_vcpu_return:
  833. // Assume x0 is the vcpu pointer, x1 the return code
  834. // Guest's x0-x3 are on the stack
  835. // Guest context
  836. add x2, x0, #VCPU_CONTEXT
  837. save_guest_regs
  838. bl __save_fpsimd
  839. bl __save_sysregs
  840. skip_debug_state x3, 1f
  841. bl __save_debug
  842. 1:
  843. save_guest_32bit_state
  844. save_timer_state
  845. save_vgic_state
  846. deactivate_traps
  847. deactivate_vm
  848. // Host context
  849. ldr x2, [x0, #VCPU_HOST_CONTEXT]
  850. kern_hyp_va x2
  851. bl __restore_sysregs
  852. bl __restore_fpsimd
  853. skip_debug_state x3, 1f
  854. // Clear the dirty flag for the next run, as all the state has
  855. // already been saved. Note that we nuke the whole 64bit word.
  856. // If we ever add more flags, we'll have to be more careful...
  857. str xzr, [x0, #VCPU_DEBUG_FLAGS]
  858. bl __restore_debug
  859. 1:
  860. restore_host_regs
  861. mov x0, x1
  862. ret
  863. END(__kvm_vcpu_run)
  864. // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
  865. ENTRY(__kvm_tlb_flush_vmid_ipa)
  866. dsb ishst
  867. kern_hyp_va x0
  868. ldr x2, [x0, #KVM_VTTBR]
  869. msr vttbr_el2, x2
  870. isb
  871. /*
  872. * We could do so much better if we had the VA as well.
  873. * Instead, we invalidate Stage-2 for this IPA, and the
  874. * whole of Stage-1. Weep...
  875. */
  876. lsr x1, x1, #12
  877. tlbi ipas2e1is, x1
  878. /*
  879. * We have to ensure completion of the invalidation at Stage-2,
  880. * since a table walk on another CPU could refill a TLB with a
  881. * complete (S1 + S2) walk based on the old Stage-2 mapping if
  882. * the Stage-1 invalidation happened first.
  883. */
  884. dsb ish
  885. tlbi vmalle1is
  886. dsb ish
  887. isb
  888. msr vttbr_el2, xzr
  889. ret
  890. ENDPROC(__kvm_tlb_flush_vmid_ipa)
  891. /**
  892. * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
  893. * @struct kvm *kvm - pointer to kvm structure
  894. *
  895. * Invalidates all Stage 1 and 2 TLB entries for current VMID.
  896. */
  897. ENTRY(__kvm_tlb_flush_vmid)
  898. dsb ishst
  899. kern_hyp_va x0
  900. ldr x2, [x0, #KVM_VTTBR]
  901. msr vttbr_el2, x2
  902. isb
  903. tlbi vmalls12e1is
  904. dsb ish
  905. isb
  906. msr vttbr_el2, xzr
  907. ret
  908. ENDPROC(__kvm_tlb_flush_vmid)
  909. ENTRY(__kvm_flush_vm_context)
  910. dsb ishst
  911. tlbi alle1is
  912. ic ialluis
  913. dsb ish
  914. ret
  915. ENDPROC(__kvm_flush_vm_context)
  916. __kvm_hyp_panic:
  917. // Guess the context by looking at VTTBR:
  918. // If zero, then we're already a host.
  919. // Otherwise restore a minimal host context before panicing.
  920. mrs x0, vttbr_el2
  921. cbz x0, 1f
  922. mrs x0, tpidr_el2
  923. deactivate_traps
  924. deactivate_vm
  925. ldr x2, [x0, #VCPU_HOST_CONTEXT]
  926. kern_hyp_va x2
  927. bl __restore_sysregs
  928. 1: adr x0, __hyp_panic_str
  929. adr x1, 2f
  930. ldp x2, x3, [x1]
  931. sub x0, x0, x2
  932. add x0, x0, x3
  933. mrs x1, spsr_el2
  934. mrs x2, elr_el2
  935. mrs x3, esr_el2
  936. mrs x4, far_el2
  937. mrs x5, hpfar_el2
  938. mrs x6, par_el1
  939. mrs x7, tpidr_el2
  940. mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
  941. PSR_MODE_EL1h)
  942. msr spsr_el2, lr
  943. ldr lr, =panic
  944. msr elr_el2, lr
  945. eret
  946. .align 3
  947. 2: .quad HYP_PAGE_OFFSET
  948. .quad PAGE_OFFSET
  949. ENDPROC(__kvm_hyp_panic)
  950. __hyp_panic_str:
  951. .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
  952. .align 2
  953. /*
  954. * u64 kvm_call_hyp(void *hypfn, ...);
  955. *
  956. * This is not really a variadic function in the classic C-way and care must
  957. * be taken when calling this to ensure parameters are passed in registers
  958. * only, since the stack will change between the caller and the callee.
  959. *
  960. * Call the function with the first argument containing a pointer to the
  961. * function you wish to call in Hyp mode, and subsequent arguments will be
  962. * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
  963. * function pointer can be passed). The function being called must be mapped
  964. * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
  965. * passed in r0 and r1.
  966. *
  967. * A function pointer with a value of 0 has a special meaning, and is
  968. * used to implement __hyp_get_vectors in the same way as in
  969. * arch/arm64/kernel/hyp_stub.S.
  970. */
  971. ENTRY(kvm_call_hyp)
  972. hvc #0
  973. ret
  974. ENDPROC(kvm_call_hyp)
  975. .macro invalid_vector label, target
  976. .align 2
  977. \label:
  978. b \target
  979. ENDPROC(\label)
  980. .endm
  981. /* None of these should ever happen */
  982. invalid_vector el2t_sync_invalid, __kvm_hyp_panic
  983. invalid_vector el2t_irq_invalid, __kvm_hyp_panic
  984. invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
  985. invalid_vector el2t_error_invalid, __kvm_hyp_panic
  986. invalid_vector el2h_sync_invalid, __kvm_hyp_panic
  987. invalid_vector el2h_irq_invalid, __kvm_hyp_panic
  988. invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
  989. invalid_vector el2h_error_invalid, __kvm_hyp_panic
  990. invalid_vector el1_sync_invalid, __kvm_hyp_panic
  991. invalid_vector el1_irq_invalid, __kvm_hyp_panic
  992. invalid_vector el1_fiq_invalid, __kvm_hyp_panic
  993. invalid_vector el1_error_invalid, __kvm_hyp_panic
  994. el1_sync: // Guest trapped into EL2
  995. push x0, x1
  996. push x2, x3
  997. mrs x1, esr_el2
  998. lsr x2, x1, #ESR_ELx_EC_SHIFT
  999. cmp x2, #ESR_ELx_EC_HVC64
  1000. b.ne el1_trap
  1001. mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
  1002. cbnz x3, el1_trap // called HVC
  1003. /* Here, we're pretty sure the host called HVC. */
  1004. pop x2, x3
  1005. pop x0, x1
  1006. /* Check for __hyp_get_vectors */
  1007. cbnz x0, 1f
  1008. mrs x0, vbar_el2
  1009. b 2f
  1010. 1: push lr, xzr
  1011. /*
  1012. * Compute the function address in EL2, and shuffle the parameters.
  1013. */
  1014. kern_hyp_va x0
  1015. mov lr, x0
  1016. mov x0, x1
  1017. mov x1, x2
  1018. mov x2, x3
  1019. blr lr
  1020. pop lr, xzr
  1021. 2: eret
  1022. el1_trap:
  1023. /*
  1024. * x1: ESR
  1025. * x2: ESR_EC
  1026. */
  1027. cmp x2, #ESR_ELx_EC_DABT_LOW
  1028. mov x0, #ESR_ELx_EC_IABT_LOW
  1029. ccmp x2, x0, #4, ne
  1030. b.ne 1f // Not an abort we care about
  1031. /* This is an abort. Check for permission fault */
  1032. and x2, x1, #ESR_ELx_FSC_TYPE
  1033. cmp x2, #FSC_PERM
  1034. b.ne 1f // Not a permission fault
  1035. /*
  1036. * Check for Stage-1 page table walk, which is guaranteed
  1037. * to give a valid HPFAR_EL2.
  1038. */
  1039. tbnz x1, #7, 1f // S1PTW is set
  1040. /* Preserve PAR_EL1 */
  1041. mrs x3, par_el1
  1042. push x3, xzr
  1043. /*
  1044. * Permission fault, HPFAR_EL2 is invalid.
  1045. * Resolve the IPA the hard way using the guest VA.
  1046. * Stage-1 translation already validated the memory access rights.
  1047. * As such, we can use the EL1 translation regime, and don't have
  1048. * to distinguish between EL0 and EL1 access.
  1049. */
  1050. mrs x2, far_el2
  1051. at s1e1r, x2
  1052. isb
  1053. /* Read result */
  1054. mrs x3, par_el1
  1055. pop x0, xzr // Restore PAR_EL1 from the stack
  1056. msr par_el1, x0
  1057. tbnz x3, #0, 3f // Bail out if we failed the translation
  1058. ubfx x3, x3, #12, #36 // Extract IPA
  1059. lsl x3, x3, #4 // and present it like HPFAR
  1060. b 2f
  1061. 1: mrs x3, hpfar_el2
  1062. mrs x2, far_el2
  1063. 2: mrs x0, tpidr_el2
  1064. str w1, [x0, #VCPU_ESR_EL2]
  1065. str x2, [x0, #VCPU_FAR_EL2]
  1066. str x3, [x0, #VCPU_HPFAR_EL2]
  1067. mov x1, #ARM_EXCEPTION_TRAP
  1068. b __kvm_vcpu_return
  1069. /*
  1070. * Translation failed. Just return to the guest and
  1071. * let it fault again. Another CPU is probably playing
  1072. * behind our back.
  1073. */
  1074. 3: pop x2, x3
  1075. pop x0, x1
  1076. eret
  1077. el1_irq:
  1078. push x0, x1
  1079. push x2, x3
  1080. mrs x0, tpidr_el2
  1081. mov x1, #ARM_EXCEPTION_IRQ
  1082. b __kvm_vcpu_return
  1083. .ltorg
  1084. .align 11
  1085. ENTRY(__kvm_hyp_vector)
  1086. ventry el2t_sync_invalid // Synchronous EL2t
  1087. ventry el2t_irq_invalid // IRQ EL2t
  1088. ventry el2t_fiq_invalid // FIQ EL2t
  1089. ventry el2t_error_invalid // Error EL2t
  1090. ventry el2h_sync_invalid // Synchronous EL2h
  1091. ventry el2h_irq_invalid // IRQ EL2h
  1092. ventry el2h_fiq_invalid // FIQ EL2h
  1093. ventry el2h_error_invalid // Error EL2h
  1094. ventry el1_sync // Synchronous 64-bit EL1
  1095. ventry el1_irq // IRQ 64-bit EL1
  1096. ventry el1_fiq_invalid // FIQ 64-bit EL1
  1097. ventry el1_error_invalid // Error 64-bit EL1
  1098. ventry el1_sync // Synchronous 32-bit EL1
  1099. ventry el1_irq // IRQ 32-bit EL1
  1100. ventry el1_fiq_invalid // FIQ 32-bit EL1
  1101. ventry el1_error_invalid // Error 32-bit EL1
  1102. ENDPROC(__kvm_hyp_vector)
  1103. .popsection