sys_regs.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522
  1. /*
  2. * Copyright (C) 2012,2013 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * Derived from arch/arm/kvm/coproc.c:
  6. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  7. * Authors: Rusty Russell <rusty@rustcorp.com.au>
  8. * Christoffer Dall <c.dall@virtualopensystems.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License, version 2, as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  21. */
  22. #include <linux/kvm_host.h>
  23. #include <linux/mm.h>
  24. #include <linux/uaccess.h>
  25. #include <asm/cacheflush.h>
  26. #include <asm/cputype.h>
  27. #include <asm/debug-monitors.h>
  28. #include <asm/esr.h>
  29. #include <asm/kvm_arm.h>
  30. #include <asm/kvm_coproc.h>
  31. #include <asm/kvm_emulate.h>
  32. #include <asm/kvm_host.h>
  33. #include <asm/kvm_mmu.h>
  34. #include <trace/events/kvm.h>
  35. #include "sys_regs.h"
  36. /*
  37. * All of this file is extremly similar to the ARM coproc.c, but the
  38. * types are different. My gut feeling is that it should be pretty
  39. * easy to merge, but that would be an ABI breakage -- again. VFP
  40. * would also need to be abstracted.
  41. *
  42. * For AArch32, we only take care of what is being trapped. Anything
  43. * that has to do with init and userspace access has to go via the
  44. * 64bit interface.
  45. */
  46. /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  47. static u32 cache_levels;
  48. /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  49. #define CSSELR_MAX 12
  50. /* Which cache CCSIDR represents depends on CSSELR value. */
  51. static u32 get_ccsidr(u32 csselr)
  52. {
  53. u32 ccsidr;
  54. /* Make sure noone else changes CSSELR during this! */
  55. local_irq_disable();
  56. /* Put value into CSSELR */
  57. asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
  58. isb();
  59. /* Read result out of CCSIDR */
  60. asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
  61. local_irq_enable();
  62. return ccsidr;
  63. }
  64. /*
  65. * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
  66. */
  67. static bool access_dcsw(struct kvm_vcpu *vcpu,
  68. const struct sys_reg_params *p,
  69. const struct sys_reg_desc *r)
  70. {
  71. if (!p->is_write)
  72. return read_from_write_only(vcpu, p);
  73. kvm_set_way_flush(vcpu);
  74. return true;
  75. }
  76. /*
  77. * Generic accessor for VM registers. Only called as long as HCR_TVM
  78. * is set. If the guest enables the MMU, we stop trapping the VM
  79. * sys_regs and leave it in complete control of the caches.
  80. */
  81. static bool access_vm_reg(struct kvm_vcpu *vcpu,
  82. const struct sys_reg_params *p,
  83. const struct sys_reg_desc *r)
  84. {
  85. unsigned long val;
  86. bool was_enabled = vcpu_has_cache_enabled(vcpu);
  87. BUG_ON(!p->is_write);
  88. val = *vcpu_reg(vcpu, p->Rt);
  89. if (!p->is_aarch32) {
  90. vcpu_sys_reg(vcpu, r->reg) = val;
  91. } else {
  92. if (!p->is_32bit)
  93. vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
  94. vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
  95. }
  96. kvm_toggle_cache(vcpu, was_enabled);
  97. return true;
  98. }
  99. /*
  100. * Trap handler for the GICv3 SGI generation system register.
  101. * Forward the request to the VGIC emulation.
  102. * The cp15_64 code makes sure this automatically works
  103. * for both AArch64 and AArch32 accesses.
  104. */
  105. static bool access_gic_sgi(struct kvm_vcpu *vcpu,
  106. const struct sys_reg_params *p,
  107. const struct sys_reg_desc *r)
  108. {
  109. u64 val;
  110. if (!p->is_write)
  111. return read_from_write_only(vcpu, p);
  112. val = *vcpu_reg(vcpu, p->Rt);
  113. vgic_v3_dispatch_sgi(vcpu, val);
  114. return true;
  115. }
  116. static bool trap_raz_wi(struct kvm_vcpu *vcpu,
  117. const struct sys_reg_params *p,
  118. const struct sys_reg_desc *r)
  119. {
  120. if (p->is_write)
  121. return ignore_write(vcpu, p);
  122. else
  123. return read_zero(vcpu, p);
  124. }
  125. static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
  126. const struct sys_reg_params *p,
  127. const struct sys_reg_desc *r)
  128. {
  129. if (p->is_write) {
  130. return ignore_write(vcpu, p);
  131. } else {
  132. *vcpu_reg(vcpu, p->Rt) = (1 << 3);
  133. return true;
  134. }
  135. }
  136. static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
  137. const struct sys_reg_params *p,
  138. const struct sys_reg_desc *r)
  139. {
  140. if (p->is_write) {
  141. return ignore_write(vcpu, p);
  142. } else {
  143. u32 val;
  144. asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
  145. *vcpu_reg(vcpu, p->Rt) = val;
  146. return true;
  147. }
  148. }
  149. /*
  150. * We want to avoid world-switching all the DBG registers all the
  151. * time:
  152. *
  153. * - If we've touched any debug register, it is likely that we're
  154. * going to touch more of them. It then makes sense to disable the
  155. * traps and start doing the save/restore dance
  156. * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
  157. * then mandatory to save/restore the registers, as the guest
  158. * depends on them.
  159. *
  160. * For this, we use a DIRTY bit, indicating the guest has modified the
  161. * debug registers, used as follow:
  162. *
  163. * On guest entry:
  164. * - If the dirty bit is set (because we're coming back from trapping),
  165. * disable the traps, save host registers, restore guest registers.
  166. * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
  167. * set the dirty bit, disable the traps, save host registers,
  168. * restore guest registers.
  169. * - Otherwise, enable the traps
  170. *
  171. * On guest exit:
  172. * - If the dirty bit is set, save guest registers, restore host
  173. * registers and clear the dirty bit. This ensure that the host can
  174. * now use the debug registers.
  175. */
  176. static bool trap_debug_regs(struct kvm_vcpu *vcpu,
  177. const struct sys_reg_params *p,
  178. const struct sys_reg_desc *r)
  179. {
  180. if (p->is_write) {
  181. vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
  182. vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
  183. } else {
  184. *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
  185. }
  186. return true;
  187. }
  188. static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  189. {
  190. u64 amair;
  191. asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
  192. vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
  193. }
  194. static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  195. {
  196. u64 mpidr;
  197. /*
  198. * Map the vcpu_id into the first three affinity level fields of
  199. * the MPIDR. We limit the number of VCPUs in level 0 due to a
  200. * limitation to 16 CPUs in that level in the ICC_SGIxR registers
  201. * of the GICv3 to be able to address each CPU directly when
  202. * sending IPIs.
  203. */
  204. mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
  205. mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
  206. mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
  207. vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
  208. }
  209. /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
  210. #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
  211. /* DBGBVRn_EL1 */ \
  212. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
  213. trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \
  214. /* DBGBCRn_EL1 */ \
  215. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
  216. trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \
  217. /* DBGWVRn_EL1 */ \
  218. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
  219. trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \
  220. /* DBGWCRn_EL1 */ \
  221. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
  222. trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 }
  223. /*
  224. * Architected system registers.
  225. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
  226. *
  227. * We could trap ID_DFR0 and tell the guest we don't support performance
  228. * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
  229. * NAKed, so it will read the PMCR anyway.
  230. *
  231. * Therefore we tell the guest we have 0 counters. Unfortunately, we
  232. * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
  233. * all PM registers, which doesn't crash the guest kernel at least.
  234. *
  235. * Debug handling: We do trap most, if not all debug related system
  236. * registers. The implementation is good enough to ensure that a guest
  237. * can use these with minimal performance degradation. The drawback is
  238. * that we don't implement any of the external debug, none of the
  239. * OSlock protocol. This should be revisited if we ever encounter a
  240. * more demanding guest...
  241. */
  242. static const struct sys_reg_desc sys_reg_descs[] = {
  243. /* DC ISW */
  244. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
  245. access_dcsw },
  246. /* DC CSW */
  247. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
  248. access_dcsw },
  249. /* DC CISW */
  250. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
  251. access_dcsw },
  252. DBG_BCR_BVR_WCR_WVR_EL1(0),
  253. DBG_BCR_BVR_WCR_WVR_EL1(1),
  254. /* MDCCINT_EL1 */
  255. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
  256. trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
  257. /* MDSCR_EL1 */
  258. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
  259. trap_debug_regs, reset_val, MDSCR_EL1, 0 },
  260. DBG_BCR_BVR_WCR_WVR_EL1(2),
  261. DBG_BCR_BVR_WCR_WVR_EL1(3),
  262. DBG_BCR_BVR_WCR_WVR_EL1(4),
  263. DBG_BCR_BVR_WCR_WVR_EL1(5),
  264. DBG_BCR_BVR_WCR_WVR_EL1(6),
  265. DBG_BCR_BVR_WCR_WVR_EL1(7),
  266. DBG_BCR_BVR_WCR_WVR_EL1(8),
  267. DBG_BCR_BVR_WCR_WVR_EL1(9),
  268. DBG_BCR_BVR_WCR_WVR_EL1(10),
  269. DBG_BCR_BVR_WCR_WVR_EL1(11),
  270. DBG_BCR_BVR_WCR_WVR_EL1(12),
  271. DBG_BCR_BVR_WCR_WVR_EL1(13),
  272. DBG_BCR_BVR_WCR_WVR_EL1(14),
  273. DBG_BCR_BVR_WCR_WVR_EL1(15),
  274. /* MDRAR_EL1 */
  275. { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
  276. trap_raz_wi },
  277. /* OSLAR_EL1 */
  278. { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
  279. trap_raz_wi },
  280. /* OSLSR_EL1 */
  281. { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
  282. trap_oslsr_el1 },
  283. /* OSDLR_EL1 */
  284. { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
  285. trap_raz_wi },
  286. /* DBGPRCR_EL1 */
  287. { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
  288. trap_raz_wi },
  289. /* DBGCLAIMSET_EL1 */
  290. { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
  291. trap_raz_wi },
  292. /* DBGCLAIMCLR_EL1 */
  293. { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
  294. trap_raz_wi },
  295. /* DBGAUTHSTATUS_EL1 */
  296. { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
  297. trap_dbgauthstatus_el1 },
  298. /* TEECR32_EL1 */
  299. { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
  300. NULL, reset_val, TEECR32_EL1, 0 },
  301. /* TEEHBR32_EL1 */
  302. { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
  303. NULL, reset_val, TEEHBR32_EL1, 0 },
  304. /* MDCCSR_EL1 */
  305. { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
  306. trap_raz_wi },
  307. /* DBGDTR_EL0 */
  308. { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
  309. trap_raz_wi },
  310. /* DBGDTR[TR]X_EL0 */
  311. { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
  312. trap_raz_wi },
  313. /* DBGVCR32_EL2 */
  314. { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
  315. NULL, reset_val, DBGVCR32_EL2, 0 },
  316. /* MPIDR_EL1 */
  317. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
  318. NULL, reset_mpidr, MPIDR_EL1 },
  319. /* SCTLR_EL1 */
  320. { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
  321. access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
  322. /* CPACR_EL1 */
  323. { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
  324. NULL, reset_val, CPACR_EL1, 0 },
  325. /* TTBR0_EL1 */
  326. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
  327. access_vm_reg, reset_unknown, TTBR0_EL1 },
  328. /* TTBR1_EL1 */
  329. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
  330. access_vm_reg, reset_unknown, TTBR1_EL1 },
  331. /* TCR_EL1 */
  332. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
  333. access_vm_reg, reset_val, TCR_EL1, 0 },
  334. /* AFSR0_EL1 */
  335. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
  336. access_vm_reg, reset_unknown, AFSR0_EL1 },
  337. /* AFSR1_EL1 */
  338. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
  339. access_vm_reg, reset_unknown, AFSR1_EL1 },
  340. /* ESR_EL1 */
  341. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
  342. access_vm_reg, reset_unknown, ESR_EL1 },
  343. /* FAR_EL1 */
  344. { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
  345. access_vm_reg, reset_unknown, FAR_EL1 },
  346. /* PAR_EL1 */
  347. { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
  348. NULL, reset_unknown, PAR_EL1 },
  349. /* PMINTENSET_EL1 */
  350. { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
  351. trap_raz_wi },
  352. /* PMINTENCLR_EL1 */
  353. { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
  354. trap_raz_wi },
  355. /* MAIR_EL1 */
  356. { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
  357. access_vm_reg, reset_unknown, MAIR_EL1 },
  358. /* AMAIR_EL1 */
  359. { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
  360. access_vm_reg, reset_amair_el1, AMAIR_EL1 },
  361. /* VBAR_EL1 */
  362. { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
  363. NULL, reset_val, VBAR_EL1, 0 },
  364. /* ICC_SGI1R_EL1 */
  365. { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
  366. access_gic_sgi },
  367. /* ICC_SRE_EL1 */
  368. { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
  369. trap_raz_wi },
  370. /* CONTEXTIDR_EL1 */
  371. { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
  372. access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
  373. /* TPIDR_EL1 */
  374. { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
  375. NULL, reset_unknown, TPIDR_EL1 },
  376. /* CNTKCTL_EL1 */
  377. { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
  378. NULL, reset_val, CNTKCTL_EL1, 0},
  379. /* CSSELR_EL1 */
  380. { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
  381. NULL, reset_unknown, CSSELR_EL1 },
  382. /* PMCR_EL0 */
  383. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
  384. trap_raz_wi },
  385. /* PMCNTENSET_EL0 */
  386. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
  387. trap_raz_wi },
  388. /* PMCNTENCLR_EL0 */
  389. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
  390. trap_raz_wi },
  391. /* PMOVSCLR_EL0 */
  392. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
  393. trap_raz_wi },
  394. /* PMSWINC_EL0 */
  395. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
  396. trap_raz_wi },
  397. /* PMSELR_EL0 */
  398. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
  399. trap_raz_wi },
  400. /* PMCEID0_EL0 */
  401. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
  402. trap_raz_wi },
  403. /* PMCEID1_EL0 */
  404. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
  405. trap_raz_wi },
  406. /* PMCCNTR_EL0 */
  407. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
  408. trap_raz_wi },
  409. /* PMXEVTYPER_EL0 */
  410. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
  411. trap_raz_wi },
  412. /* PMXEVCNTR_EL0 */
  413. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
  414. trap_raz_wi },
  415. /* PMUSERENR_EL0 */
  416. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
  417. trap_raz_wi },
  418. /* PMOVSSET_EL0 */
  419. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
  420. trap_raz_wi },
  421. /* TPIDR_EL0 */
  422. { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
  423. NULL, reset_unknown, TPIDR_EL0 },
  424. /* TPIDRRO_EL0 */
  425. { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
  426. NULL, reset_unknown, TPIDRRO_EL0 },
  427. /* DACR32_EL2 */
  428. { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
  429. NULL, reset_unknown, DACR32_EL2 },
  430. /* IFSR32_EL2 */
  431. { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
  432. NULL, reset_unknown, IFSR32_EL2 },
  433. /* FPEXC32_EL2 */
  434. { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
  435. NULL, reset_val, FPEXC32_EL2, 0x70 },
  436. };
  437. static bool trap_dbgidr(struct kvm_vcpu *vcpu,
  438. const struct sys_reg_params *p,
  439. const struct sys_reg_desc *r)
  440. {
  441. if (p->is_write) {
  442. return ignore_write(vcpu, p);
  443. } else {
  444. u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
  445. u64 pfr = read_cpuid(ID_AA64PFR0_EL1);
  446. u32 el3 = !!((pfr >> 12) & 0xf);
  447. *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) |
  448. (((dfr >> 12) & 0xf) << 24) |
  449. (((dfr >> 28) & 0xf) << 20) |
  450. (6 << 16) | (el3 << 14) | (el3 << 12));
  451. return true;
  452. }
  453. }
  454. static bool trap_debug32(struct kvm_vcpu *vcpu,
  455. const struct sys_reg_params *p,
  456. const struct sys_reg_desc *r)
  457. {
  458. if (p->is_write) {
  459. vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
  460. vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
  461. } else {
  462. *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
  463. }
  464. return true;
  465. }
  466. #define DBG_BCR_BVR_WCR_WVR(n) \
  467. /* DBGBVRn */ \
  468. { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \
  469. NULL, (cp14_DBGBVR0 + (n) * 2) }, \
  470. /* DBGBCRn */ \
  471. { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \
  472. NULL, (cp14_DBGBCR0 + (n) * 2) }, \
  473. /* DBGWVRn */ \
  474. { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \
  475. NULL, (cp14_DBGWVR0 + (n) * 2) }, \
  476. /* DBGWCRn */ \
  477. { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \
  478. NULL, (cp14_DBGWCR0 + (n) * 2) }
  479. #define DBGBXVR(n) \
  480. { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \
  481. NULL, cp14_DBGBXVR0 + n * 2 }
  482. /*
  483. * Trapped cp14 registers. We generally ignore most of the external
  484. * debug, on the principle that they don't really make sense to a
  485. * guest. Revisit this one day, whould this principle change.
  486. */
  487. static const struct sys_reg_desc cp14_regs[] = {
  488. /* DBGIDR */
  489. { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
  490. /* DBGDTRRXext */
  491. { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
  492. DBG_BCR_BVR_WCR_WVR(0),
  493. /* DBGDSCRint */
  494. { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
  495. DBG_BCR_BVR_WCR_WVR(1),
  496. /* DBGDCCINT */
  497. { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
  498. /* DBGDSCRext */
  499. { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
  500. DBG_BCR_BVR_WCR_WVR(2),
  501. /* DBGDTR[RT]Xint */
  502. { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
  503. /* DBGDTR[RT]Xext */
  504. { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
  505. DBG_BCR_BVR_WCR_WVR(3),
  506. DBG_BCR_BVR_WCR_WVR(4),
  507. DBG_BCR_BVR_WCR_WVR(5),
  508. /* DBGWFAR */
  509. { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
  510. /* DBGOSECCR */
  511. { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
  512. DBG_BCR_BVR_WCR_WVR(6),
  513. /* DBGVCR */
  514. { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
  515. DBG_BCR_BVR_WCR_WVR(7),
  516. DBG_BCR_BVR_WCR_WVR(8),
  517. DBG_BCR_BVR_WCR_WVR(9),
  518. DBG_BCR_BVR_WCR_WVR(10),
  519. DBG_BCR_BVR_WCR_WVR(11),
  520. DBG_BCR_BVR_WCR_WVR(12),
  521. DBG_BCR_BVR_WCR_WVR(13),
  522. DBG_BCR_BVR_WCR_WVR(14),
  523. DBG_BCR_BVR_WCR_WVR(15),
  524. /* DBGDRAR (32bit) */
  525. { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
  526. DBGBXVR(0),
  527. /* DBGOSLAR */
  528. { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
  529. DBGBXVR(1),
  530. /* DBGOSLSR */
  531. { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
  532. DBGBXVR(2),
  533. DBGBXVR(3),
  534. /* DBGOSDLR */
  535. { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
  536. DBGBXVR(4),
  537. /* DBGPRCR */
  538. { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
  539. DBGBXVR(5),
  540. DBGBXVR(6),
  541. DBGBXVR(7),
  542. DBGBXVR(8),
  543. DBGBXVR(9),
  544. DBGBXVR(10),
  545. DBGBXVR(11),
  546. DBGBXVR(12),
  547. DBGBXVR(13),
  548. DBGBXVR(14),
  549. DBGBXVR(15),
  550. /* DBGDSAR (32bit) */
  551. { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
  552. /* DBGDEVID2 */
  553. { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
  554. /* DBGDEVID1 */
  555. { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
  556. /* DBGDEVID */
  557. { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
  558. /* DBGCLAIMSET */
  559. { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
  560. /* DBGCLAIMCLR */
  561. { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
  562. /* DBGAUTHSTATUS */
  563. { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
  564. };
  565. /* Trapped cp14 64bit registers */
  566. static const struct sys_reg_desc cp14_64_regs[] = {
  567. /* DBGDRAR (64bit) */
  568. { Op1( 0), CRm( 1), .access = trap_raz_wi },
  569. /* DBGDSAR (64bit) */
  570. { Op1( 0), CRm( 2), .access = trap_raz_wi },
  571. };
  572. /*
  573. * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
  574. * depending on the way they are accessed (as a 32bit or a 64bit
  575. * register).
  576. */
  577. static const struct sys_reg_desc cp15_regs[] = {
  578. { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
  579. { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
  580. { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
  581. { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
  582. { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
  583. { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
  584. { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
  585. { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
  586. { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
  587. { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
  588. { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
  589. { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
  590. /*
  591. * DC{C,I,CI}SW operations:
  592. */
  593. { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
  594. { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
  595. { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
  596. /* PMU */
  597. { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
  598. { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
  599. { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
  600. { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
  601. { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi },
  602. { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
  603. { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
  604. { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
  605. { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
  606. { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
  607. { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
  608. { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
  609. { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
  610. { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
  611. { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
  612. { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
  613. { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
  614. /* ICC_SRE */
  615. { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
  616. { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
  617. };
  618. static const struct sys_reg_desc cp15_64_regs[] = {
  619. { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
  620. { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
  621. { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
  622. };
  623. /* Target specific emulation tables */
  624. static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
  625. void kvm_register_target_sys_reg_table(unsigned int target,
  626. struct kvm_sys_reg_target_table *table)
  627. {
  628. target_tables[target] = table;
  629. }
  630. /* Get specific register table for this target. */
  631. static const struct sys_reg_desc *get_target_table(unsigned target,
  632. bool mode_is_64,
  633. size_t *num)
  634. {
  635. struct kvm_sys_reg_target_table *table;
  636. table = target_tables[target];
  637. if (mode_is_64) {
  638. *num = table->table64.num;
  639. return table->table64.table;
  640. } else {
  641. *num = table->table32.num;
  642. return table->table32.table;
  643. }
  644. }
  645. static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
  646. const struct sys_reg_desc table[],
  647. unsigned int num)
  648. {
  649. unsigned int i;
  650. for (i = 0; i < num; i++) {
  651. const struct sys_reg_desc *r = &table[i];
  652. if (params->Op0 != r->Op0)
  653. continue;
  654. if (params->Op1 != r->Op1)
  655. continue;
  656. if (params->CRn != r->CRn)
  657. continue;
  658. if (params->CRm != r->CRm)
  659. continue;
  660. if (params->Op2 != r->Op2)
  661. continue;
  662. return r;
  663. }
  664. return NULL;
  665. }
  666. int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  667. {
  668. kvm_inject_undefined(vcpu);
  669. return 1;
  670. }
  671. /*
  672. * emulate_cp -- tries to match a sys_reg access in a handling table, and
  673. * call the corresponding trap handler.
  674. *
  675. * @params: pointer to the descriptor of the access
  676. * @table: array of trap descriptors
  677. * @num: size of the trap descriptor array
  678. *
  679. * Return 0 if the access has been handled, and -1 if not.
  680. */
  681. static int emulate_cp(struct kvm_vcpu *vcpu,
  682. const struct sys_reg_params *params,
  683. const struct sys_reg_desc *table,
  684. size_t num)
  685. {
  686. const struct sys_reg_desc *r;
  687. if (!table)
  688. return -1; /* Not handled */
  689. r = find_reg(params, table, num);
  690. if (r) {
  691. /*
  692. * Not having an accessor means that we have
  693. * configured a trap that we don't know how to
  694. * handle. This certainly qualifies as a gross bug
  695. * that should be fixed right away.
  696. */
  697. BUG_ON(!r->access);
  698. if (likely(r->access(vcpu, params, r))) {
  699. /* Skip instruction, since it was emulated */
  700. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  701. }
  702. /* Handled */
  703. return 0;
  704. }
  705. /* Not handled */
  706. return -1;
  707. }
  708. static void unhandled_cp_access(struct kvm_vcpu *vcpu,
  709. struct sys_reg_params *params)
  710. {
  711. u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
  712. int cp;
  713. switch(hsr_ec) {
  714. case ESR_ELx_EC_CP15_32:
  715. case ESR_ELx_EC_CP15_64:
  716. cp = 15;
  717. break;
  718. case ESR_ELx_EC_CP14_MR:
  719. case ESR_ELx_EC_CP14_64:
  720. cp = 14;
  721. break;
  722. default:
  723. WARN_ON((cp = -1));
  724. }
  725. kvm_err("Unsupported guest CP%d access at: %08lx\n",
  726. cp, *vcpu_pc(vcpu));
  727. print_sys_reg_instr(params);
  728. kvm_inject_undefined(vcpu);
  729. }
  730. /**
  731. * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
  732. * @vcpu: The VCPU pointer
  733. * @run: The kvm_run struct
  734. */
  735. static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
  736. const struct sys_reg_desc *global,
  737. size_t nr_global,
  738. const struct sys_reg_desc *target_specific,
  739. size_t nr_specific)
  740. {
  741. struct sys_reg_params params;
  742. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  743. int Rt2 = (hsr >> 10) & 0xf;
  744. params.is_aarch32 = true;
  745. params.is_32bit = false;
  746. params.CRm = (hsr >> 1) & 0xf;
  747. params.Rt = (hsr >> 5) & 0xf;
  748. params.is_write = ((hsr & 1) == 0);
  749. params.Op0 = 0;
  750. params.Op1 = (hsr >> 16) & 0xf;
  751. params.Op2 = 0;
  752. params.CRn = 0;
  753. /*
  754. * Massive hack here. Store Rt2 in the top 32bits so we only
  755. * have one register to deal with. As we use the same trap
  756. * backends between AArch32 and AArch64, we get away with it.
  757. */
  758. if (params.is_write) {
  759. u64 val = *vcpu_reg(vcpu, params.Rt);
  760. val &= 0xffffffff;
  761. val |= *vcpu_reg(vcpu, Rt2) << 32;
  762. *vcpu_reg(vcpu, params.Rt) = val;
  763. }
  764. if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
  765. goto out;
  766. if (!emulate_cp(vcpu, &params, global, nr_global))
  767. goto out;
  768. unhandled_cp_access(vcpu, &params);
  769. out:
  770. /* Do the opposite hack for the read side */
  771. if (!params.is_write) {
  772. u64 val = *vcpu_reg(vcpu, params.Rt);
  773. val >>= 32;
  774. *vcpu_reg(vcpu, Rt2) = val;
  775. }
  776. return 1;
  777. }
  778. /**
  779. * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
  780. * @vcpu: The VCPU pointer
  781. * @run: The kvm_run struct
  782. */
  783. static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
  784. const struct sys_reg_desc *global,
  785. size_t nr_global,
  786. const struct sys_reg_desc *target_specific,
  787. size_t nr_specific)
  788. {
  789. struct sys_reg_params params;
  790. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  791. params.is_aarch32 = true;
  792. params.is_32bit = true;
  793. params.CRm = (hsr >> 1) & 0xf;
  794. params.Rt = (hsr >> 5) & 0xf;
  795. params.is_write = ((hsr & 1) == 0);
  796. params.CRn = (hsr >> 10) & 0xf;
  797. params.Op0 = 0;
  798. params.Op1 = (hsr >> 14) & 0x7;
  799. params.Op2 = (hsr >> 17) & 0x7;
  800. if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
  801. return 1;
  802. if (!emulate_cp(vcpu, &params, global, nr_global))
  803. return 1;
  804. unhandled_cp_access(vcpu, &params);
  805. return 1;
  806. }
  807. int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
  808. {
  809. const struct sys_reg_desc *target_specific;
  810. size_t num;
  811. target_specific = get_target_table(vcpu->arch.target, false, &num);
  812. return kvm_handle_cp_64(vcpu,
  813. cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
  814. target_specific, num);
  815. }
  816. int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
  817. {
  818. const struct sys_reg_desc *target_specific;
  819. size_t num;
  820. target_specific = get_target_table(vcpu->arch.target, false, &num);
  821. return kvm_handle_cp_32(vcpu,
  822. cp15_regs, ARRAY_SIZE(cp15_regs),
  823. target_specific, num);
  824. }
  825. int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
  826. {
  827. return kvm_handle_cp_64(vcpu,
  828. cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
  829. NULL, 0);
  830. }
  831. int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
  832. {
  833. return kvm_handle_cp_32(vcpu,
  834. cp14_regs, ARRAY_SIZE(cp14_regs),
  835. NULL, 0);
  836. }
  837. static int emulate_sys_reg(struct kvm_vcpu *vcpu,
  838. const struct sys_reg_params *params)
  839. {
  840. size_t num;
  841. const struct sys_reg_desc *table, *r;
  842. table = get_target_table(vcpu->arch.target, true, &num);
  843. /* Search target-specific then generic table. */
  844. r = find_reg(params, table, num);
  845. if (!r)
  846. r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  847. if (likely(r)) {
  848. /*
  849. * Not having an accessor means that we have
  850. * configured a trap that we don't know how to
  851. * handle. This certainly qualifies as a gross bug
  852. * that should be fixed right away.
  853. */
  854. BUG_ON(!r->access);
  855. if (likely(r->access(vcpu, params, r))) {
  856. /* Skip instruction, since it was emulated */
  857. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  858. return 1;
  859. }
  860. /* If access function fails, it should complain. */
  861. } else {
  862. kvm_err("Unsupported guest sys_reg access at: %lx\n",
  863. *vcpu_pc(vcpu));
  864. print_sys_reg_instr(params);
  865. }
  866. kvm_inject_undefined(vcpu);
  867. return 1;
  868. }
  869. static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
  870. const struct sys_reg_desc *table, size_t num)
  871. {
  872. unsigned long i;
  873. for (i = 0; i < num; i++)
  874. if (table[i].reset)
  875. table[i].reset(vcpu, &table[i]);
  876. }
  877. /**
  878. * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
  879. * @vcpu: The VCPU pointer
  880. * @run: The kvm_run struct
  881. */
  882. int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
  883. {
  884. struct sys_reg_params params;
  885. unsigned long esr = kvm_vcpu_get_hsr(vcpu);
  886. params.is_aarch32 = false;
  887. params.is_32bit = false;
  888. params.Op0 = (esr >> 20) & 3;
  889. params.Op1 = (esr >> 14) & 0x7;
  890. params.CRn = (esr >> 10) & 0xf;
  891. params.CRm = (esr >> 1) & 0xf;
  892. params.Op2 = (esr >> 17) & 0x7;
  893. params.Rt = (esr >> 5) & 0x1f;
  894. params.is_write = !(esr & 1);
  895. return emulate_sys_reg(vcpu, &params);
  896. }
  897. /******************************************************************************
  898. * Userspace API
  899. *****************************************************************************/
  900. static bool index_to_params(u64 id, struct sys_reg_params *params)
  901. {
  902. switch (id & KVM_REG_SIZE_MASK) {
  903. case KVM_REG_SIZE_U64:
  904. /* Any unused index bits means it's not valid. */
  905. if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
  906. | KVM_REG_ARM_COPROC_MASK
  907. | KVM_REG_ARM64_SYSREG_OP0_MASK
  908. | KVM_REG_ARM64_SYSREG_OP1_MASK
  909. | KVM_REG_ARM64_SYSREG_CRN_MASK
  910. | KVM_REG_ARM64_SYSREG_CRM_MASK
  911. | KVM_REG_ARM64_SYSREG_OP2_MASK))
  912. return false;
  913. params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
  914. >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
  915. params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
  916. >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
  917. params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
  918. >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
  919. params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
  920. >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
  921. params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
  922. >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
  923. return true;
  924. default:
  925. return false;
  926. }
  927. }
  928. /* Decode an index value, and find the sys_reg_desc entry. */
  929. static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
  930. u64 id)
  931. {
  932. size_t num;
  933. const struct sys_reg_desc *table, *r;
  934. struct sys_reg_params params;
  935. /* We only do sys_reg for now. */
  936. if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
  937. return NULL;
  938. if (!index_to_params(id, &params))
  939. return NULL;
  940. table = get_target_table(vcpu->arch.target, true, &num);
  941. r = find_reg(&params, table, num);
  942. if (!r)
  943. r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  944. /* Not saved in the sys_reg array? */
  945. if (r && !r->reg)
  946. r = NULL;
  947. return r;
  948. }
  949. /*
  950. * These are the invariant sys_reg registers: we let the guest see the
  951. * host versions of these, so they're part of the guest state.
  952. *
  953. * A future CPU may provide a mechanism to present different values to
  954. * the guest, or a future kvm may trap them.
  955. */
  956. #define FUNCTION_INVARIANT(reg) \
  957. static void get_##reg(struct kvm_vcpu *v, \
  958. const struct sys_reg_desc *r) \
  959. { \
  960. u64 val; \
  961. \
  962. asm volatile("mrs %0, " __stringify(reg) "\n" \
  963. : "=r" (val)); \
  964. ((struct sys_reg_desc *)r)->val = val; \
  965. }
  966. FUNCTION_INVARIANT(midr_el1)
  967. FUNCTION_INVARIANT(ctr_el0)
  968. FUNCTION_INVARIANT(revidr_el1)
  969. FUNCTION_INVARIANT(id_pfr0_el1)
  970. FUNCTION_INVARIANT(id_pfr1_el1)
  971. FUNCTION_INVARIANT(id_dfr0_el1)
  972. FUNCTION_INVARIANT(id_afr0_el1)
  973. FUNCTION_INVARIANT(id_mmfr0_el1)
  974. FUNCTION_INVARIANT(id_mmfr1_el1)
  975. FUNCTION_INVARIANT(id_mmfr2_el1)
  976. FUNCTION_INVARIANT(id_mmfr3_el1)
  977. FUNCTION_INVARIANT(id_isar0_el1)
  978. FUNCTION_INVARIANT(id_isar1_el1)
  979. FUNCTION_INVARIANT(id_isar2_el1)
  980. FUNCTION_INVARIANT(id_isar3_el1)
  981. FUNCTION_INVARIANT(id_isar4_el1)
  982. FUNCTION_INVARIANT(id_isar5_el1)
  983. FUNCTION_INVARIANT(clidr_el1)
  984. FUNCTION_INVARIANT(aidr_el1)
  985. /* ->val is filled in by kvm_sys_reg_table_init() */
  986. static struct sys_reg_desc invariant_sys_regs[] = {
  987. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
  988. NULL, get_midr_el1 },
  989. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
  990. NULL, get_revidr_el1 },
  991. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
  992. NULL, get_id_pfr0_el1 },
  993. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
  994. NULL, get_id_pfr1_el1 },
  995. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
  996. NULL, get_id_dfr0_el1 },
  997. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
  998. NULL, get_id_afr0_el1 },
  999. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
  1000. NULL, get_id_mmfr0_el1 },
  1001. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
  1002. NULL, get_id_mmfr1_el1 },
  1003. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
  1004. NULL, get_id_mmfr2_el1 },
  1005. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
  1006. NULL, get_id_mmfr3_el1 },
  1007. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
  1008. NULL, get_id_isar0_el1 },
  1009. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
  1010. NULL, get_id_isar1_el1 },
  1011. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
  1012. NULL, get_id_isar2_el1 },
  1013. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
  1014. NULL, get_id_isar3_el1 },
  1015. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
  1016. NULL, get_id_isar4_el1 },
  1017. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
  1018. NULL, get_id_isar5_el1 },
  1019. { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
  1020. NULL, get_clidr_el1 },
  1021. { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
  1022. NULL, get_aidr_el1 },
  1023. { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
  1024. NULL, get_ctr_el0 },
  1025. };
  1026. static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
  1027. {
  1028. if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
  1029. return -EFAULT;
  1030. return 0;
  1031. }
  1032. static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
  1033. {
  1034. if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
  1035. return -EFAULT;
  1036. return 0;
  1037. }
  1038. static int get_invariant_sys_reg(u64 id, void __user *uaddr)
  1039. {
  1040. struct sys_reg_params params;
  1041. const struct sys_reg_desc *r;
  1042. if (!index_to_params(id, &params))
  1043. return -ENOENT;
  1044. r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
  1045. if (!r)
  1046. return -ENOENT;
  1047. return reg_to_user(uaddr, &r->val, id);
  1048. }
  1049. static int set_invariant_sys_reg(u64 id, void __user *uaddr)
  1050. {
  1051. struct sys_reg_params params;
  1052. const struct sys_reg_desc *r;
  1053. int err;
  1054. u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
  1055. if (!index_to_params(id, &params))
  1056. return -ENOENT;
  1057. r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
  1058. if (!r)
  1059. return -ENOENT;
  1060. err = reg_from_user(&val, uaddr, id);
  1061. if (err)
  1062. return err;
  1063. /* This is what we mean by invariant: you can't change it. */
  1064. if (r->val != val)
  1065. return -EINVAL;
  1066. return 0;
  1067. }
  1068. static bool is_valid_cache(u32 val)
  1069. {
  1070. u32 level, ctype;
  1071. if (val >= CSSELR_MAX)
  1072. return false;
  1073. /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
  1074. level = (val >> 1);
  1075. ctype = (cache_levels >> (level * 3)) & 7;
  1076. switch (ctype) {
  1077. case 0: /* No cache */
  1078. return false;
  1079. case 1: /* Instruction cache only */
  1080. return (val & 1);
  1081. case 2: /* Data cache only */
  1082. case 4: /* Unified cache */
  1083. return !(val & 1);
  1084. case 3: /* Separate instruction and data caches */
  1085. return true;
  1086. default: /* Reserved: we can't know instruction or data. */
  1087. return false;
  1088. }
  1089. }
  1090. static int demux_c15_get(u64 id, void __user *uaddr)
  1091. {
  1092. u32 val;
  1093. u32 __user *uval = uaddr;
  1094. /* Fail if we have unknown bits set. */
  1095. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  1096. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  1097. return -ENOENT;
  1098. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  1099. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  1100. if (KVM_REG_SIZE(id) != 4)
  1101. return -ENOENT;
  1102. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  1103. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  1104. if (!is_valid_cache(val))
  1105. return -ENOENT;
  1106. return put_user(get_ccsidr(val), uval);
  1107. default:
  1108. return -ENOENT;
  1109. }
  1110. }
  1111. static int demux_c15_set(u64 id, void __user *uaddr)
  1112. {
  1113. u32 val, newval;
  1114. u32 __user *uval = uaddr;
  1115. /* Fail if we have unknown bits set. */
  1116. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  1117. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  1118. return -ENOENT;
  1119. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  1120. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  1121. if (KVM_REG_SIZE(id) != 4)
  1122. return -ENOENT;
  1123. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  1124. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  1125. if (!is_valid_cache(val))
  1126. return -ENOENT;
  1127. if (get_user(newval, uval))
  1128. return -EFAULT;
  1129. /* This is also invariant: you can't change it. */
  1130. if (newval != get_ccsidr(val))
  1131. return -EINVAL;
  1132. return 0;
  1133. default:
  1134. return -ENOENT;
  1135. }
  1136. }
  1137. int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  1138. {
  1139. const struct sys_reg_desc *r;
  1140. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  1141. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  1142. return demux_c15_get(reg->id, uaddr);
  1143. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  1144. return -ENOENT;
  1145. r = index_to_sys_reg_desc(vcpu, reg->id);
  1146. if (!r)
  1147. return get_invariant_sys_reg(reg->id, uaddr);
  1148. return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
  1149. }
  1150. int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  1151. {
  1152. const struct sys_reg_desc *r;
  1153. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  1154. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  1155. return demux_c15_set(reg->id, uaddr);
  1156. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  1157. return -ENOENT;
  1158. r = index_to_sys_reg_desc(vcpu, reg->id);
  1159. if (!r)
  1160. return set_invariant_sys_reg(reg->id, uaddr);
  1161. return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
  1162. }
  1163. static unsigned int num_demux_regs(void)
  1164. {
  1165. unsigned int i, count = 0;
  1166. for (i = 0; i < CSSELR_MAX; i++)
  1167. if (is_valid_cache(i))
  1168. count++;
  1169. return count;
  1170. }
  1171. static int write_demux_regids(u64 __user *uindices)
  1172. {
  1173. u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
  1174. unsigned int i;
  1175. val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
  1176. for (i = 0; i < CSSELR_MAX; i++) {
  1177. if (!is_valid_cache(i))
  1178. continue;
  1179. if (put_user(val | i, uindices))
  1180. return -EFAULT;
  1181. uindices++;
  1182. }
  1183. return 0;
  1184. }
  1185. static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
  1186. {
  1187. return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
  1188. KVM_REG_ARM64_SYSREG |
  1189. (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
  1190. (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
  1191. (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
  1192. (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
  1193. (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
  1194. }
  1195. static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
  1196. {
  1197. if (!*uind)
  1198. return true;
  1199. if (put_user(sys_reg_to_index(reg), *uind))
  1200. return false;
  1201. (*uind)++;
  1202. return true;
  1203. }
  1204. /* Assumed ordered tables, see kvm_sys_reg_table_init. */
  1205. static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
  1206. {
  1207. const struct sys_reg_desc *i1, *i2, *end1, *end2;
  1208. unsigned int total = 0;
  1209. size_t num;
  1210. /* We check for duplicates here, to allow arch-specific overrides. */
  1211. i1 = get_target_table(vcpu->arch.target, true, &num);
  1212. end1 = i1 + num;
  1213. i2 = sys_reg_descs;
  1214. end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
  1215. BUG_ON(i1 == end1 || i2 == end2);
  1216. /* Walk carefully, as both tables may refer to the same register. */
  1217. while (i1 || i2) {
  1218. int cmp = cmp_sys_reg(i1, i2);
  1219. /* target-specific overrides generic entry. */
  1220. if (cmp <= 0) {
  1221. /* Ignore registers we trap but don't save. */
  1222. if (i1->reg) {
  1223. if (!copy_reg_to_user(i1, &uind))
  1224. return -EFAULT;
  1225. total++;
  1226. }
  1227. } else {
  1228. /* Ignore registers we trap but don't save. */
  1229. if (i2->reg) {
  1230. if (!copy_reg_to_user(i2, &uind))
  1231. return -EFAULT;
  1232. total++;
  1233. }
  1234. }
  1235. if (cmp <= 0 && ++i1 == end1)
  1236. i1 = NULL;
  1237. if (cmp >= 0 && ++i2 == end2)
  1238. i2 = NULL;
  1239. }
  1240. return total;
  1241. }
  1242. unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
  1243. {
  1244. return ARRAY_SIZE(invariant_sys_regs)
  1245. + num_demux_regs()
  1246. + walk_sys_regs(vcpu, (u64 __user *)NULL);
  1247. }
  1248. int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
  1249. {
  1250. unsigned int i;
  1251. int err;
  1252. /* Then give them all the invariant registers' indices. */
  1253. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
  1254. if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
  1255. return -EFAULT;
  1256. uindices++;
  1257. }
  1258. err = walk_sys_regs(vcpu, uindices);
  1259. if (err < 0)
  1260. return err;
  1261. uindices += err;
  1262. return write_demux_regids(uindices);
  1263. }
  1264. static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
  1265. {
  1266. unsigned int i;
  1267. for (i = 1; i < n; i++) {
  1268. if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
  1269. kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
  1270. return 1;
  1271. }
  1272. }
  1273. return 0;
  1274. }
  1275. void kvm_sys_reg_table_init(void)
  1276. {
  1277. unsigned int i;
  1278. struct sys_reg_desc clidr;
  1279. /* Make sure tables are unique and in order. */
  1280. BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
  1281. BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
  1282. BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
  1283. BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
  1284. BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
  1285. BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
  1286. /* We abuse the reset function to overwrite the table itself. */
  1287. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
  1288. invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
  1289. /*
  1290. * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
  1291. *
  1292. * If software reads the Cache Type fields from Ctype1
  1293. * upwards, once it has seen a value of 0b000, no caches
  1294. * exist at further-out levels of the hierarchy. So, for
  1295. * example, if Ctype3 is the first Cache Type field with a
  1296. * value of 0b000, the values of Ctype4 to Ctype7 must be
  1297. * ignored.
  1298. */
  1299. get_clidr_el1(NULL, &clidr); /* Ugly... */
  1300. cache_levels = clidr.val;
  1301. for (i = 0; i < 7; i++)
  1302. if (((cache_levels >> (i*3)) & 7) == 0)
  1303. break;
  1304. /* Clear all higher bits. */
  1305. cache_levels &= (1 << (i*3))-1;
  1306. }
  1307. /**
  1308. * kvm_reset_sys_regs - sets system registers to reset value
  1309. * @vcpu: The VCPU pointer
  1310. *
  1311. * This function finds the right table above and sets the registers on the
  1312. * virtual CPU struct to their architecturally defined reset values.
  1313. */
  1314. void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
  1315. {
  1316. size_t num;
  1317. const struct sys_reg_desc *table;
  1318. /* Catch someone adding a register without putting in reset entry. */
  1319. memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
  1320. /* Generic chip reset first (so target could override). */
  1321. reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  1322. table = get_target_table(vcpu->arch.target, true, &num);
  1323. reset_sys_reg_descs(vcpu, table, num);
  1324. for (num = 1; num < NR_SYS_REGS; num++)
  1325. if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
  1326. panic("Didn't reset vcpu_sys_reg(%zi)", num);
  1327. }