sys_regs.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235
  1. /*
  2. * Copyright (C) 2012,2013 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * Derived from arch/arm/kvm/coproc.c:
  6. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  7. * Authors: Rusty Russell <rusty@rustcorp.com.au>
  8. * Christoffer Dall <c.dall@virtualopensystems.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License, version 2, as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  21. */
  22. #include <linux/bsearch.h>
  23. #include <linux/kvm_host.h>
  24. #include <linux/mm.h>
  25. #include <linux/uaccess.h>
  26. #include <asm/cacheflush.h>
  27. #include <asm/cputype.h>
  28. #include <asm/debug-monitors.h>
  29. #include <asm/esr.h>
  30. #include <asm/kvm_arm.h>
  31. #include <asm/kvm_asm.h>
  32. #include <asm/kvm_coproc.h>
  33. #include <asm/kvm_emulate.h>
  34. #include <asm/kvm_host.h>
  35. #include <asm/kvm_mmu.h>
  36. #include <asm/perf_event.h>
  37. #include <asm/sysreg.h>
  38. #include <trace/events/kvm.h>
  39. #include "sys_regs.h"
  40. #include "trace.h"
  41. /*
  42. * All of this file is extremly similar to the ARM coproc.c, but the
  43. * types are different. My gut feeling is that it should be pretty
  44. * easy to merge, but that would be an ABI breakage -- again. VFP
  45. * would also need to be abstracted.
  46. *
  47. * For AArch32, we only take care of what is being trapped. Anything
  48. * that has to do with init and userspace access has to go via the
  49. * 64bit interface.
  50. */
  51. static bool read_from_write_only(struct kvm_vcpu *vcpu,
  52. struct sys_reg_params *params,
  53. const struct sys_reg_desc *r)
  54. {
  55. WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
  56. print_sys_reg_instr(params);
  57. kvm_inject_undefined(vcpu);
  58. return false;
  59. }
  60. static bool write_to_read_only(struct kvm_vcpu *vcpu,
  61. struct sys_reg_params *params,
  62. const struct sys_reg_desc *r)
  63. {
  64. WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
  65. print_sys_reg_instr(params);
  66. kvm_inject_undefined(vcpu);
  67. return false;
  68. }
  69. /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  70. static u32 cache_levels;
  71. /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  72. #define CSSELR_MAX 12
  73. /* Which cache CCSIDR represents depends on CSSELR value. */
  74. static u32 get_ccsidr(u32 csselr)
  75. {
  76. u32 ccsidr;
  77. /* Make sure noone else changes CSSELR during this! */
  78. local_irq_disable();
  79. write_sysreg(csselr, csselr_el1);
  80. isb();
  81. ccsidr = read_sysreg(ccsidr_el1);
  82. local_irq_enable();
  83. return ccsidr;
  84. }
  85. /*
  86. * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
  87. */
  88. static bool access_dcsw(struct kvm_vcpu *vcpu,
  89. struct sys_reg_params *p,
  90. const struct sys_reg_desc *r)
  91. {
  92. if (!p->is_write)
  93. return read_from_write_only(vcpu, p, r);
  94. kvm_set_way_flush(vcpu);
  95. return true;
  96. }
  97. /*
  98. * Generic accessor for VM registers. Only called as long as HCR_TVM
  99. * is set. If the guest enables the MMU, we stop trapping the VM
  100. * sys_regs and leave it in complete control of the caches.
  101. */
  102. static bool access_vm_reg(struct kvm_vcpu *vcpu,
  103. struct sys_reg_params *p,
  104. const struct sys_reg_desc *r)
  105. {
  106. bool was_enabled = vcpu_has_cache_enabled(vcpu);
  107. BUG_ON(!p->is_write);
  108. if (!p->is_aarch32) {
  109. vcpu_sys_reg(vcpu, r->reg) = p->regval;
  110. } else {
  111. if (!p->is_32bit)
  112. vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
  113. vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
  114. }
  115. kvm_toggle_cache(vcpu, was_enabled);
  116. return true;
  117. }
  118. /*
  119. * Trap handler for the GICv3 SGI generation system register.
  120. * Forward the request to the VGIC emulation.
  121. * The cp15_64 code makes sure this automatically works
  122. * for both AArch64 and AArch32 accesses.
  123. */
  124. static bool access_gic_sgi(struct kvm_vcpu *vcpu,
  125. struct sys_reg_params *p,
  126. const struct sys_reg_desc *r)
  127. {
  128. if (!p->is_write)
  129. return read_from_write_only(vcpu, p, r);
  130. vgic_v3_dispatch_sgi(vcpu, p->regval);
  131. return true;
  132. }
  133. static bool access_gic_sre(struct kvm_vcpu *vcpu,
  134. struct sys_reg_params *p,
  135. const struct sys_reg_desc *r)
  136. {
  137. if (p->is_write)
  138. return ignore_write(vcpu, p);
  139. p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
  140. return true;
  141. }
  142. static bool trap_raz_wi(struct kvm_vcpu *vcpu,
  143. struct sys_reg_params *p,
  144. const struct sys_reg_desc *r)
  145. {
  146. if (p->is_write)
  147. return ignore_write(vcpu, p);
  148. else
  149. return read_zero(vcpu, p);
  150. }
  151. static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
  152. struct sys_reg_params *p,
  153. const struct sys_reg_desc *r)
  154. {
  155. if (p->is_write) {
  156. return ignore_write(vcpu, p);
  157. } else {
  158. p->regval = (1 << 3);
  159. return true;
  160. }
  161. }
  162. static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
  163. struct sys_reg_params *p,
  164. const struct sys_reg_desc *r)
  165. {
  166. if (p->is_write) {
  167. return ignore_write(vcpu, p);
  168. } else {
  169. p->regval = read_sysreg(dbgauthstatus_el1);
  170. return true;
  171. }
  172. }
  173. /*
  174. * We want to avoid world-switching all the DBG registers all the
  175. * time:
  176. *
  177. * - If we've touched any debug register, it is likely that we're
  178. * going to touch more of them. It then makes sense to disable the
  179. * traps and start doing the save/restore dance
  180. * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
  181. * then mandatory to save/restore the registers, as the guest
  182. * depends on them.
  183. *
  184. * For this, we use a DIRTY bit, indicating the guest has modified the
  185. * debug registers, used as follow:
  186. *
  187. * On guest entry:
  188. * - If the dirty bit is set (because we're coming back from trapping),
  189. * disable the traps, save host registers, restore guest registers.
  190. * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
  191. * set the dirty bit, disable the traps, save host registers,
  192. * restore guest registers.
  193. * - Otherwise, enable the traps
  194. *
  195. * On guest exit:
  196. * - If the dirty bit is set, save guest registers, restore host
  197. * registers and clear the dirty bit. This ensure that the host can
  198. * now use the debug registers.
  199. */
  200. static bool trap_debug_regs(struct kvm_vcpu *vcpu,
  201. struct sys_reg_params *p,
  202. const struct sys_reg_desc *r)
  203. {
  204. if (p->is_write) {
  205. vcpu_sys_reg(vcpu, r->reg) = p->regval;
  206. vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
  207. } else {
  208. p->regval = vcpu_sys_reg(vcpu, r->reg);
  209. }
  210. trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
  211. return true;
  212. }
  213. /*
  214. * reg_to_dbg/dbg_to_reg
  215. *
  216. * A 32 bit write to a debug register leave top bits alone
  217. * A 32 bit read from a debug register only returns the bottom bits
  218. *
  219. * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
  220. * hyp.S code switches between host and guest values in future.
  221. */
  222. static void reg_to_dbg(struct kvm_vcpu *vcpu,
  223. struct sys_reg_params *p,
  224. u64 *dbg_reg)
  225. {
  226. u64 val = p->regval;
  227. if (p->is_32bit) {
  228. val &= 0xffffffffUL;
  229. val |= ((*dbg_reg >> 32) << 32);
  230. }
  231. *dbg_reg = val;
  232. vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
  233. }
  234. static void dbg_to_reg(struct kvm_vcpu *vcpu,
  235. struct sys_reg_params *p,
  236. u64 *dbg_reg)
  237. {
  238. p->regval = *dbg_reg;
  239. if (p->is_32bit)
  240. p->regval &= 0xffffffffUL;
  241. }
  242. static bool trap_bvr(struct kvm_vcpu *vcpu,
  243. struct sys_reg_params *p,
  244. const struct sys_reg_desc *rd)
  245. {
  246. u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
  247. if (p->is_write)
  248. reg_to_dbg(vcpu, p, dbg_reg);
  249. else
  250. dbg_to_reg(vcpu, p, dbg_reg);
  251. trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
  252. return true;
  253. }
  254. static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  255. const struct kvm_one_reg *reg, void __user *uaddr)
  256. {
  257. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
  258. if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
  259. return -EFAULT;
  260. return 0;
  261. }
  262. static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  263. const struct kvm_one_reg *reg, void __user *uaddr)
  264. {
  265. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
  266. if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
  267. return -EFAULT;
  268. return 0;
  269. }
  270. static void reset_bvr(struct kvm_vcpu *vcpu,
  271. const struct sys_reg_desc *rd)
  272. {
  273. vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
  274. }
  275. static bool trap_bcr(struct kvm_vcpu *vcpu,
  276. struct sys_reg_params *p,
  277. const struct sys_reg_desc *rd)
  278. {
  279. u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
  280. if (p->is_write)
  281. reg_to_dbg(vcpu, p, dbg_reg);
  282. else
  283. dbg_to_reg(vcpu, p, dbg_reg);
  284. trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
  285. return true;
  286. }
  287. static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  288. const struct kvm_one_reg *reg, void __user *uaddr)
  289. {
  290. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
  291. if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
  292. return -EFAULT;
  293. return 0;
  294. }
  295. static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  296. const struct kvm_one_reg *reg, void __user *uaddr)
  297. {
  298. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
  299. if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
  300. return -EFAULT;
  301. return 0;
  302. }
  303. static void reset_bcr(struct kvm_vcpu *vcpu,
  304. const struct sys_reg_desc *rd)
  305. {
  306. vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
  307. }
  308. static bool trap_wvr(struct kvm_vcpu *vcpu,
  309. struct sys_reg_params *p,
  310. const struct sys_reg_desc *rd)
  311. {
  312. u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
  313. if (p->is_write)
  314. reg_to_dbg(vcpu, p, dbg_reg);
  315. else
  316. dbg_to_reg(vcpu, p, dbg_reg);
  317. trace_trap_reg(__func__, rd->reg, p->is_write,
  318. vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
  319. return true;
  320. }
  321. static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  322. const struct kvm_one_reg *reg, void __user *uaddr)
  323. {
  324. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
  325. if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
  326. return -EFAULT;
  327. return 0;
  328. }
  329. static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  330. const struct kvm_one_reg *reg, void __user *uaddr)
  331. {
  332. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
  333. if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
  334. return -EFAULT;
  335. return 0;
  336. }
  337. static void reset_wvr(struct kvm_vcpu *vcpu,
  338. const struct sys_reg_desc *rd)
  339. {
  340. vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
  341. }
  342. static bool trap_wcr(struct kvm_vcpu *vcpu,
  343. struct sys_reg_params *p,
  344. const struct sys_reg_desc *rd)
  345. {
  346. u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
  347. if (p->is_write)
  348. reg_to_dbg(vcpu, p, dbg_reg);
  349. else
  350. dbg_to_reg(vcpu, p, dbg_reg);
  351. trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
  352. return true;
  353. }
  354. static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  355. const struct kvm_one_reg *reg, void __user *uaddr)
  356. {
  357. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
  358. if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
  359. return -EFAULT;
  360. return 0;
  361. }
  362. static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  363. const struct kvm_one_reg *reg, void __user *uaddr)
  364. {
  365. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
  366. if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
  367. return -EFAULT;
  368. return 0;
  369. }
  370. static void reset_wcr(struct kvm_vcpu *vcpu,
  371. const struct sys_reg_desc *rd)
  372. {
  373. vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
  374. }
  375. static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  376. {
  377. vcpu_sys_reg(vcpu, AMAIR_EL1) = read_sysreg(amair_el1);
  378. }
  379. static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  380. {
  381. u64 mpidr;
  382. /*
  383. * Map the vcpu_id into the first three affinity level fields of
  384. * the MPIDR. We limit the number of VCPUs in level 0 due to a
  385. * limitation to 16 CPUs in that level in the ICC_SGIxR registers
  386. * of the GICv3 to be able to address each CPU directly when
  387. * sending IPIs.
  388. */
  389. mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
  390. mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
  391. mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
  392. vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
  393. }
  394. static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  395. {
  396. u64 pmcr, val;
  397. /* No PMU available, PMCR_EL0 may UNDEF... */
  398. if (!kvm_arm_support_pmu_v3())
  399. return;
  400. pmcr = read_sysreg(pmcr_el0);
  401. /*
  402. * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
  403. * except PMCR.E resetting to zero.
  404. */
  405. val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
  406. | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
  407. vcpu_sys_reg(vcpu, PMCR_EL0) = val;
  408. }
  409. static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
  410. {
  411. u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
  412. bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
  413. if (!enabled)
  414. kvm_inject_undefined(vcpu);
  415. return !enabled;
  416. }
  417. static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
  418. {
  419. return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
  420. }
  421. static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
  422. {
  423. return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
  424. }
  425. static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
  426. {
  427. return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
  428. }
  429. static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
  430. {
  431. return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
  432. }
  433. static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  434. const struct sys_reg_desc *r)
  435. {
  436. u64 val;
  437. if (!kvm_arm_pmu_v3_ready(vcpu))
  438. return trap_raz_wi(vcpu, p, r);
  439. if (pmu_access_el0_disabled(vcpu))
  440. return false;
  441. if (p->is_write) {
  442. /* Only update writeable bits of PMCR */
  443. val = vcpu_sys_reg(vcpu, PMCR_EL0);
  444. val &= ~ARMV8_PMU_PMCR_MASK;
  445. val |= p->regval & ARMV8_PMU_PMCR_MASK;
  446. vcpu_sys_reg(vcpu, PMCR_EL0) = val;
  447. kvm_pmu_handle_pmcr(vcpu, val);
  448. } else {
  449. /* PMCR.P & PMCR.C are RAZ */
  450. val = vcpu_sys_reg(vcpu, PMCR_EL0)
  451. & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
  452. p->regval = val;
  453. }
  454. return true;
  455. }
  456. static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  457. const struct sys_reg_desc *r)
  458. {
  459. if (!kvm_arm_pmu_v3_ready(vcpu))
  460. return trap_raz_wi(vcpu, p, r);
  461. if (pmu_access_event_counter_el0_disabled(vcpu))
  462. return false;
  463. if (p->is_write)
  464. vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
  465. else
  466. /* return PMSELR.SEL field */
  467. p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0)
  468. & ARMV8_PMU_COUNTER_MASK;
  469. return true;
  470. }
  471. static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  472. const struct sys_reg_desc *r)
  473. {
  474. u64 pmceid;
  475. if (!kvm_arm_pmu_v3_ready(vcpu))
  476. return trap_raz_wi(vcpu, p, r);
  477. BUG_ON(p->is_write);
  478. if (pmu_access_el0_disabled(vcpu))
  479. return false;
  480. if (!(p->Op2 & 1))
  481. pmceid = read_sysreg(pmceid0_el0);
  482. else
  483. pmceid = read_sysreg(pmceid1_el0);
  484. p->regval = pmceid;
  485. return true;
  486. }
  487. static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
  488. {
  489. u64 pmcr, val;
  490. pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
  491. val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
  492. if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
  493. kvm_inject_undefined(vcpu);
  494. return false;
  495. }
  496. return true;
  497. }
  498. static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
  499. struct sys_reg_params *p,
  500. const struct sys_reg_desc *r)
  501. {
  502. u64 idx;
  503. if (!kvm_arm_pmu_v3_ready(vcpu))
  504. return trap_raz_wi(vcpu, p, r);
  505. if (r->CRn == 9 && r->CRm == 13) {
  506. if (r->Op2 == 2) {
  507. /* PMXEVCNTR_EL0 */
  508. if (pmu_access_event_counter_el0_disabled(vcpu))
  509. return false;
  510. idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
  511. & ARMV8_PMU_COUNTER_MASK;
  512. } else if (r->Op2 == 0) {
  513. /* PMCCNTR_EL0 */
  514. if (pmu_access_cycle_counter_el0_disabled(vcpu))
  515. return false;
  516. idx = ARMV8_PMU_CYCLE_IDX;
  517. } else {
  518. return false;
  519. }
  520. } else if (r->CRn == 0 && r->CRm == 9) {
  521. /* PMCCNTR */
  522. if (pmu_access_event_counter_el0_disabled(vcpu))
  523. return false;
  524. idx = ARMV8_PMU_CYCLE_IDX;
  525. } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
  526. /* PMEVCNTRn_EL0 */
  527. if (pmu_access_event_counter_el0_disabled(vcpu))
  528. return false;
  529. idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
  530. } else {
  531. return false;
  532. }
  533. if (!pmu_counter_idx_valid(vcpu, idx))
  534. return false;
  535. if (p->is_write) {
  536. if (pmu_access_el0_disabled(vcpu))
  537. return false;
  538. kvm_pmu_set_counter_value(vcpu, idx, p->regval);
  539. } else {
  540. p->regval = kvm_pmu_get_counter_value(vcpu, idx);
  541. }
  542. return true;
  543. }
  544. static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  545. const struct sys_reg_desc *r)
  546. {
  547. u64 idx, reg;
  548. if (!kvm_arm_pmu_v3_ready(vcpu))
  549. return trap_raz_wi(vcpu, p, r);
  550. if (pmu_access_el0_disabled(vcpu))
  551. return false;
  552. if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
  553. /* PMXEVTYPER_EL0 */
  554. idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
  555. reg = PMEVTYPER0_EL0 + idx;
  556. } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
  557. idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
  558. if (idx == ARMV8_PMU_CYCLE_IDX)
  559. reg = PMCCFILTR_EL0;
  560. else
  561. /* PMEVTYPERn_EL0 */
  562. reg = PMEVTYPER0_EL0 + idx;
  563. } else {
  564. BUG();
  565. }
  566. if (!pmu_counter_idx_valid(vcpu, idx))
  567. return false;
  568. if (p->is_write) {
  569. kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
  570. vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
  571. } else {
  572. p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
  573. }
  574. return true;
  575. }
  576. static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  577. const struct sys_reg_desc *r)
  578. {
  579. u64 val, mask;
  580. if (!kvm_arm_pmu_v3_ready(vcpu))
  581. return trap_raz_wi(vcpu, p, r);
  582. if (pmu_access_el0_disabled(vcpu))
  583. return false;
  584. mask = kvm_pmu_valid_counter_mask(vcpu);
  585. if (p->is_write) {
  586. val = p->regval & mask;
  587. if (r->Op2 & 0x1) {
  588. /* accessing PMCNTENSET_EL0 */
  589. vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
  590. kvm_pmu_enable_counter(vcpu, val);
  591. } else {
  592. /* accessing PMCNTENCLR_EL0 */
  593. vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
  594. kvm_pmu_disable_counter(vcpu, val);
  595. }
  596. } else {
  597. p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
  598. }
  599. return true;
  600. }
  601. static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  602. const struct sys_reg_desc *r)
  603. {
  604. u64 mask = kvm_pmu_valid_counter_mask(vcpu);
  605. if (!kvm_arm_pmu_v3_ready(vcpu))
  606. return trap_raz_wi(vcpu, p, r);
  607. if (!vcpu_mode_priv(vcpu)) {
  608. kvm_inject_undefined(vcpu);
  609. return false;
  610. }
  611. if (p->is_write) {
  612. u64 val = p->regval & mask;
  613. if (r->Op2 & 0x1)
  614. /* accessing PMINTENSET_EL1 */
  615. vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
  616. else
  617. /* accessing PMINTENCLR_EL1 */
  618. vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
  619. } else {
  620. p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
  621. }
  622. return true;
  623. }
  624. static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  625. const struct sys_reg_desc *r)
  626. {
  627. u64 mask = kvm_pmu_valid_counter_mask(vcpu);
  628. if (!kvm_arm_pmu_v3_ready(vcpu))
  629. return trap_raz_wi(vcpu, p, r);
  630. if (pmu_access_el0_disabled(vcpu))
  631. return false;
  632. if (p->is_write) {
  633. if (r->CRm & 0x2)
  634. /* accessing PMOVSSET_EL0 */
  635. vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
  636. else
  637. /* accessing PMOVSCLR_EL0 */
  638. vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
  639. } else {
  640. p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
  641. }
  642. return true;
  643. }
  644. static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  645. const struct sys_reg_desc *r)
  646. {
  647. u64 mask;
  648. if (!kvm_arm_pmu_v3_ready(vcpu))
  649. return trap_raz_wi(vcpu, p, r);
  650. if (!p->is_write)
  651. return read_from_write_only(vcpu, p, r);
  652. if (pmu_write_swinc_el0_disabled(vcpu))
  653. return false;
  654. mask = kvm_pmu_valid_counter_mask(vcpu);
  655. kvm_pmu_software_increment(vcpu, p->regval & mask);
  656. return true;
  657. }
  658. static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  659. const struct sys_reg_desc *r)
  660. {
  661. if (!kvm_arm_pmu_v3_ready(vcpu))
  662. return trap_raz_wi(vcpu, p, r);
  663. if (p->is_write) {
  664. if (!vcpu_mode_priv(vcpu)) {
  665. kvm_inject_undefined(vcpu);
  666. return false;
  667. }
  668. vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
  669. & ARMV8_PMU_USERENR_MASK;
  670. } else {
  671. p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
  672. & ARMV8_PMU_USERENR_MASK;
  673. }
  674. return true;
  675. }
  676. /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
  677. #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
  678. { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
  679. trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
  680. { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
  681. trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
  682. { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
  683. trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
  684. { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
  685. trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
  686. /* Macro to expand the PMEVCNTRn_EL0 register */
  687. #define PMU_PMEVCNTR_EL0(n) \
  688. { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
  689. access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
  690. /* Macro to expand the PMEVTYPERn_EL0 register */
  691. #define PMU_PMEVTYPER_EL0(n) \
  692. { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
  693. access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
  694. static bool access_cntp_tval(struct kvm_vcpu *vcpu,
  695. struct sys_reg_params *p,
  696. const struct sys_reg_desc *r)
  697. {
  698. struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
  699. u64 now = kvm_phys_timer_read();
  700. if (p->is_write)
  701. ptimer->cnt_cval = p->regval + now;
  702. else
  703. p->regval = ptimer->cnt_cval - now;
  704. return true;
  705. }
  706. static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
  707. struct sys_reg_params *p,
  708. const struct sys_reg_desc *r)
  709. {
  710. struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
  711. if (p->is_write) {
  712. /* ISTATUS bit is read-only */
  713. ptimer->cnt_ctl = p->regval & ~ARCH_TIMER_CTRL_IT_STAT;
  714. } else {
  715. u64 now = kvm_phys_timer_read();
  716. p->regval = ptimer->cnt_ctl;
  717. /*
  718. * Set ISTATUS bit if it's expired.
  719. * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
  720. * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
  721. * regardless of ENABLE bit for our implementation convenience.
  722. */
  723. if (ptimer->cnt_cval <= now)
  724. p->regval |= ARCH_TIMER_CTRL_IT_STAT;
  725. }
  726. return true;
  727. }
  728. static bool access_cntp_cval(struct kvm_vcpu *vcpu,
  729. struct sys_reg_params *p,
  730. const struct sys_reg_desc *r)
  731. {
  732. struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
  733. if (p->is_write)
  734. ptimer->cnt_cval = p->regval;
  735. else
  736. p->regval = ptimer->cnt_cval;
  737. return true;
  738. }
  739. /*
  740. * Architected system registers.
  741. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
  742. *
  743. * Debug handling: We do trap most, if not all debug related system
  744. * registers. The implementation is good enough to ensure that a guest
  745. * can use these with minimal performance degradation. The drawback is
  746. * that we don't implement any of the external debug, none of the
  747. * OSlock protocol. This should be revisited if we ever encounter a
  748. * more demanding guest...
  749. */
  750. static const struct sys_reg_desc sys_reg_descs[] = {
  751. { SYS_DESC(SYS_DC_ISW), access_dcsw },
  752. { SYS_DESC(SYS_DC_CSW), access_dcsw },
  753. { SYS_DESC(SYS_DC_CISW), access_dcsw },
  754. DBG_BCR_BVR_WCR_WVR_EL1(0),
  755. DBG_BCR_BVR_WCR_WVR_EL1(1),
  756. { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
  757. { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
  758. DBG_BCR_BVR_WCR_WVR_EL1(2),
  759. DBG_BCR_BVR_WCR_WVR_EL1(3),
  760. DBG_BCR_BVR_WCR_WVR_EL1(4),
  761. DBG_BCR_BVR_WCR_WVR_EL1(5),
  762. DBG_BCR_BVR_WCR_WVR_EL1(6),
  763. DBG_BCR_BVR_WCR_WVR_EL1(7),
  764. DBG_BCR_BVR_WCR_WVR_EL1(8),
  765. DBG_BCR_BVR_WCR_WVR_EL1(9),
  766. DBG_BCR_BVR_WCR_WVR_EL1(10),
  767. DBG_BCR_BVR_WCR_WVR_EL1(11),
  768. DBG_BCR_BVR_WCR_WVR_EL1(12),
  769. DBG_BCR_BVR_WCR_WVR_EL1(13),
  770. DBG_BCR_BVR_WCR_WVR_EL1(14),
  771. DBG_BCR_BVR_WCR_WVR_EL1(15),
  772. { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
  773. { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
  774. { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
  775. { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
  776. { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
  777. { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
  778. { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
  779. { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
  780. { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
  781. { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
  782. // DBGDTR[TR]X_EL0 share the same encoding
  783. { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
  784. { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
  785. { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
  786. { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
  787. { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
  788. { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
  789. { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
  790. { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
  791. { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
  792. { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
  793. { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
  794. { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
  795. { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
  796. { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
  797. { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
  798. { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
  799. { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
  800. { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
  801. { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
  802. { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
  803. { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
  804. { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
  805. { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
  806. { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
  807. { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
  808. { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
  809. { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
  810. { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
  811. { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
  812. { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
  813. { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
  814. { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
  815. { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
  816. { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
  817. { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
  818. { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
  819. { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
  820. { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
  821. { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
  822. { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
  823. { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
  824. { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
  825. { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
  826. /*
  827. * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
  828. * in 32bit mode. Here we choose to reset it as zero for consistency.
  829. */
  830. { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
  831. { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
  832. { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
  833. { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
  834. { SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
  835. { SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
  836. { SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
  837. /* PMEVCNTRn_EL0 */
  838. PMU_PMEVCNTR_EL0(0),
  839. PMU_PMEVCNTR_EL0(1),
  840. PMU_PMEVCNTR_EL0(2),
  841. PMU_PMEVCNTR_EL0(3),
  842. PMU_PMEVCNTR_EL0(4),
  843. PMU_PMEVCNTR_EL0(5),
  844. PMU_PMEVCNTR_EL0(6),
  845. PMU_PMEVCNTR_EL0(7),
  846. PMU_PMEVCNTR_EL0(8),
  847. PMU_PMEVCNTR_EL0(9),
  848. PMU_PMEVCNTR_EL0(10),
  849. PMU_PMEVCNTR_EL0(11),
  850. PMU_PMEVCNTR_EL0(12),
  851. PMU_PMEVCNTR_EL0(13),
  852. PMU_PMEVCNTR_EL0(14),
  853. PMU_PMEVCNTR_EL0(15),
  854. PMU_PMEVCNTR_EL0(16),
  855. PMU_PMEVCNTR_EL0(17),
  856. PMU_PMEVCNTR_EL0(18),
  857. PMU_PMEVCNTR_EL0(19),
  858. PMU_PMEVCNTR_EL0(20),
  859. PMU_PMEVCNTR_EL0(21),
  860. PMU_PMEVCNTR_EL0(22),
  861. PMU_PMEVCNTR_EL0(23),
  862. PMU_PMEVCNTR_EL0(24),
  863. PMU_PMEVCNTR_EL0(25),
  864. PMU_PMEVCNTR_EL0(26),
  865. PMU_PMEVCNTR_EL0(27),
  866. PMU_PMEVCNTR_EL0(28),
  867. PMU_PMEVCNTR_EL0(29),
  868. PMU_PMEVCNTR_EL0(30),
  869. /* PMEVTYPERn_EL0 */
  870. PMU_PMEVTYPER_EL0(0),
  871. PMU_PMEVTYPER_EL0(1),
  872. PMU_PMEVTYPER_EL0(2),
  873. PMU_PMEVTYPER_EL0(3),
  874. PMU_PMEVTYPER_EL0(4),
  875. PMU_PMEVTYPER_EL0(5),
  876. PMU_PMEVTYPER_EL0(6),
  877. PMU_PMEVTYPER_EL0(7),
  878. PMU_PMEVTYPER_EL0(8),
  879. PMU_PMEVTYPER_EL0(9),
  880. PMU_PMEVTYPER_EL0(10),
  881. PMU_PMEVTYPER_EL0(11),
  882. PMU_PMEVTYPER_EL0(12),
  883. PMU_PMEVTYPER_EL0(13),
  884. PMU_PMEVTYPER_EL0(14),
  885. PMU_PMEVTYPER_EL0(15),
  886. PMU_PMEVTYPER_EL0(16),
  887. PMU_PMEVTYPER_EL0(17),
  888. PMU_PMEVTYPER_EL0(18),
  889. PMU_PMEVTYPER_EL0(19),
  890. PMU_PMEVTYPER_EL0(20),
  891. PMU_PMEVTYPER_EL0(21),
  892. PMU_PMEVTYPER_EL0(22),
  893. PMU_PMEVTYPER_EL0(23),
  894. PMU_PMEVTYPER_EL0(24),
  895. PMU_PMEVTYPER_EL0(25),
  896. PMU_PMEVTYPER_EL0(26),
  897. PMU_PMEVTYPER_EL0(27),
  898. PMU_PMEVTYPER_EL0(28),
  899. PMU_PMEVTYPER_EL0(29),
  900. PMU_PMEVTYPER_EL0(30),
  901. /*
  902. * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
  903. * in 32bit mode. Here we choose to reset it as zero for consistency.
  904. */
  905. { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
  906. { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
  907. { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
  908. { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
  909. };
  910. static bool trap_dbgidr(struct kvm_vcpu *vcpu,
  911. struct sys_reg_params *p,
  912. const struct sys_reg_desc *r)
  913. {
  914. if (p->is_write) {
  915. return ignore_write(vcpu, p);
  916. } else {
  917. u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
  918. u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
  919. u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
  920. p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
  921. (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
  922. (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
  923. | (6 << 16) | (el3 << 14) | (el3 << 12));
  924. return true;
  925. }
  926. }
  927. static bool trap_debug32(struct kvm_vcpu *vcpu,
  928. struct sys_reg_params *p,
  929. const struct sys_reg_desc *r)
  930. {
  931. if (p->is_write) {
  932. vcpu_cp14(vcpu, r->reg) = p->regval;
  933. vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
  934. } else {
  935. p->regval = vcpu_cp14(vcpu, r->reg);
  936. }
  937. return true;
  938. }
  939. /* AArch32 debug register mappings
  940. *
  941. * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
  942. * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
  943. *
  944. * All control registers and watchpoint value registers are mapped to
  945. * the lower 32 bits of their AArch64 equivalents. We share the trap
  946. * handlers with the above AArch64 code which checks what mode the
  947. * system is in.
  948. */
  949. static bool trap_xvr(struct kvm_vcpu *vcpu,
  950. struct sys_reg_params *p,
  951. const struct sys_reg_desc *rd)
  952. {
  953. u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
  954. if (p->is_write) {
  955. u64 val = *dbg_reg;
  956. val &= 0xffffffffUL;
  957. val |= p->regval << 32;
  958. *dbg_reg = val;
  959. vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
  960. } else {
  961. p->regval = *dbg_reg >> 32;
  962. }
  963. trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
  964. return true;
  965. }
  966. #define DBG_BCR_BVR_WCR_WVR(n) \
  967. /* DBGBVRn */ \
  968. { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
  969. /* DBGBCRn */ \
  970. { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
  971. /* DBGWVRn */ \
  972. { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
  973. /* DBGWCRn */ \
  974. { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
  975. #define DBGBXVR(n) \
  976. { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
  977. /*
  978. * Trapped cp14 registers. We generally ignore most of the external
  979. * debug, on the principle that they don't really make sense to a
  980. * guest. Revisit this one day, would this principle change.
  981. */
  982. static const struct sys_reg_desc cp14_regs[] = {
  983. /* DBGIDR */
  984. { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
  985. /* DBGDTRRXext */
  986. { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
  987. DBG_BCR_BVR_WCR_WVR(0),
  988. /* DBGDSCRint */
  989. { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
  990. DBG_BCR_BVR_WCR_WVR(1),
  991. /* DBGDCCINT */
  992. { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32, NULL, cp14_DBGDCCINT },
  993. /* DBGDSCRext */
  994. { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32, NULL, cp14_DBGDSCRext },
  995. DBG_BCR_BVR_WCR_WVR(2),
  996. /* DBGDTR[RT]Xint */
  997. { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
  998. /* DBGDTR[RT]Xext */
  999. { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
  1000. DBG_BCR_BVR_WCR_WVR(3),
  1001. DBG_BCR_BVR_WCR_WVR(4),
  1002. DBG_BCR_BVR_WCR_WVR(5),
  1003. /* DBGWFAR */
  1004. { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
  1005. /* DBGOSECCR */
  1006. { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
  1007. DBG_BCR_BVR_WCR_WVR(6),
  1008. /* DBGVCR */
  1009. { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32, NULL, cp14_DBGVCR },
  1010. DBG_BCR_BVR_WCR_WVR(7),
  1011. DBG_BCR_BVR_WCR_WVR(8),
  1012. DBG_BCR_BVR_WCR_WVR(9),
  1013. DBG_BCR_BVR_WCR_WVR(10),
  1014. DBG_BCR_BVR_WCR_WVR(11),
  1015. DBG_BCR_BVR_WCR_WVR(12),
  1016. DBG_BCR_BVR_WCR_WVR(13),
  1017. DBG_BCR_BVR_WCR_WVR(14),
  1018. DBG_BCR_BVR_WCR_WVR(15),
  1019. /* DBGDRAR (32bit) */
  1020. { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
  1021. DBGBXVR(0),
  1022. /* DBGOSLAR */
  1023. { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
  1024. DBGBXVR(1),
  1025. /* DBGOSLSR */
  1026. { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
  1027. DBGBXVR(2),
  1028. DBGBXVR(3),
  1029. /* DBGOSDLR */
  1030. { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
  1031. DBGBXVR(4),
  1032. /* DBGPRCR */
  1033. { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
  1034. DBGBXVR(5),
  1035. DBGBXVR(6),
  1036. DBGBXVR(7),
  1037. DBGBXVR(8),
  1038. DBGBXVR(9),
  1039. DBGBXVR(10),
  1040. DBGBXVR(11),
  1041. DBGBXVR(12),
  1042. DBGBXVR(13),
  1043. DBGBXVR(14),
  1044. DBGBXVR(15),
  1045. /* DBGDSAR (32bit) */
  1046. { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
  1047. /* DBGDEVID2 */
  1048. { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
  1049. /* DBGDEVID1 */
  1050. { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
  1051. /* DBGDEVID */
  1052. { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
  1053. /* DBGCLAIMSET */
  1054. { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
  1055. /* DBGCLAIMCLR */
  1056. { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
  1057. /* DBGAUTHSTATUS */
  1058. { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
  1059. };
  1060. /* Trapped cp14 64bit registers */
  1061. static const struct sys_reg_desc cp14_64_regs[] = {
  1062. /* DBGDRAR (64bit) */
  1063. { Op1( 0), CRm( 1), .access = trap_raz_wi },
  1064. /* DBGDSAR (64bit) */
  1065. { Op1( 0), CRm( 2), .access = trap_raz_wi },
  1066. };
  1067. /* Macro to expand the PMEVCNTRn register */
  1068. #define PMU_PMEVCNTR(n) \
  1069. /* PMEVCNTRn */ \
  1070. { Op1(0), CRn(0b1110), \
  1071. CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
  1072. access_pmu_evcntr }
  1073. /* Macro to expand the PMEVTYPERn register */
  1074. #define PMU_PMEVTYPER(n) \
  1075. /* PMEVTYPERn */ \
  1076. { Op1(0), CRn(0b1110), \
  1077. CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
  1078. access_pmu_evtyper }
  1079. /*
  1080. * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
  1081. * depending on the way they are accessed (as a 32bit or a 64bit
  1082. * register).
  1083. */
  1084. static const struct sys_reg_desc cp15_regs[] = {
  1085. { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
  1086. { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
  1087. { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
  1088. { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
  1089. { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
  1090. { Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, c2_TTBCR2 },
  1091. { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
  1092. { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
  1093. { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
  1094. { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
  1095. { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
  1096. { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
  1097. { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
  1098. /*
  1099. * DC{C,I,CI}SW operations:
  1100. */
  1101. { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
  1102. { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
  1103. { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
  1104. /* PMU */
  1105. { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
  1106. { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
  1107. { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
  1108. { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
  1109. { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
  1110. { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
  1111. { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
  1112. { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
  1113. { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
  1114. { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
  1115. { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
  1116. { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
  1117. { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
  1118. { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
  1119. { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
  1120. { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
  1121. { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
  1122. { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
  1123. { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
  1124. /* ICC_SRE */
  1125. { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
  1126. { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
  1127. /* PMEVCNTRn */
  1128. PMU_PMEVCNTR(0),
  1129. PMU_PMEVCNTR(1),
  1130. PMU_PMEVCNTR(2),
  1131. PMU_PMEVCNTR(3),
  1132. PMU_PMEVCNTR(4),
  1133. PMU_PMEVCNTR(5),
  1134. PMU_PMEVCNTR(6),
  1135. PMU_PMEVCNTR(7),
  1136. PMU_PMEVCNTR(8),
  1137. PMU_PMEVCNTR(9),
  1138. PMU_PMEVCNTR(10),
  1139. PMU_PMEVCNTR(11),
  1140. PMU_PMEVCNTR(12),
  1141. PMU_PMEVCNTR(13),
  1142. PMU_PMEVCNTR(14),
  1143. PMU_PMEVCNTR(15),
  1144. PMU_PMEVCNTR(16),
  1145. PMU_PMEVCNTR(17),
  1146. PMU_PMEVCNTR(18),
  1147. PMU_PMEVCNTR(19),
  1148. PMU_PMEVCNTR(20),
  1149. PMU_PMEVCNTR(21),
  1150. PMU_PMEVCNTR(22),
  1151. PMU_PMEVCNTR(23),
  1152. PMU_PMEVCNTR(24),
  1153. PMU_PMEVCNTR(25),
  1154. PMU_PMEVCNTR(26),
  1155. PMU_PMEVCNTR(27),
  1156. PMU_PMEVCNTR(28),
  1157. PMU_PMEVCNTR(29),
  1158. PMU_PMEVCNTR(30),
  1159. /* PMEVTYPERn */
  1160. PMU_PMEVTYPER(0),
  1161. PMU_PMEVTYPER(1),
  1162. PMU_PMEVTYPER(2),
  1163. PMU_PMEVTYPER(3),
  1164. PMU_PMEVTYPER(4),
  1165. PMU_PMEVTYPER(5),
  1166. PMU_PMEVTYPER(6),
  1167. PMU_PMEVTYPER(7),
  1168. PMU_PMEVTYPER(8),
  1169. PMU_PMEVTYPER(9),
  1170. PMU_PMEVTYPER(10),
  1171. PMU_PMEVTYPER(11),
  1172. PMU_PMEVTYPER(12),
  1173. PMU_PMEVTYPER(13),
  1174. PMU_PMEVTYPER(14),
  1175. PMU_PMEVTYPER(15),
  1176. PMU_PMEVTYPER(16),
  1177. PMU_PMEVTYPER(17),
  1178. PMU_PMEVTYPER(18),
  1179. PMU_PMEVTYPER(19),
  1180. PMU_PMEVTYPER(20),
  1181. PMU_PMEVTYPER(21),
  1182. PMU_PMEVTYPER(22),
  1183. PMU_PMEVTYPER(23),
  1184. PMU_PMEVTYPER(24),
  1185. PMU_PMEVTYPER(25),
  1186. PMU_PMEVTYPER(26),
  1187. PMU_PMEVTYPER(27),
  1188. PMU_PMEVTYPER(28),
  1189. PMU_PMEVTYPER(29),
  1190. PMU_PMEVTYPER(30),
  1191. /* PMCCFILTR */
  1192. { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
  1193. };
  1194. static const struct sys_reg_desc cp15_64_regs[] = {
  1195. { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
  1196. { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
  1197. { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
  1198. { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
  1199. };
  1200. /* Target specific emulation tables */
  1201. static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
  1202. void kvm_register_target_sys_reg_table(unsigned int target,
  1203. struct kvm_sys_reg_target_table *table)
  1204. {
  1205. target_tables[target] = table;
  1206. }
  1207. /* Get specific register table for this target. */
  1208. static const struct sys_reg_desc *get_target_table(unsigned target,
  1209. bool mode_is_64,
  1210. size_t *num)
  1211. {
  1212. struct kvm_sys_reg_target_table *table;
  1213. table = target_tables[target];
  1214. if (mode_is_64) {
  1215. *num = table->table64.num;
  1216. return table->table64.table;
  1217. } else {
  1218. *num = table->table32.num;
  1219. return table->table32.table;
  1220. }
  1221. }
  1222. #define reg_to_match_value(x) \
  1223. ({ \
  1224. unsigned long val; \
  1225. val = (x)->Op0 << 14; \
  1226. val |= (x)->Op1 << 11; \
  1227. val |= (x)->CRn << 7; \
  1228. val |= (x)->CRm << 3; \
  1229. val |= (x)->Op2; \
  1230. val; \
  1231. })
  1232. static int match_sys_reg(const void *key, const void *elt)
  1233. {
  1234. const unsigned long pval = (unsigned long)key;
  1235. const struct sys_reg_desc *r = elt;
  1236. return pval - reg_to_match_value(r);
  1237. }
  1238. static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
  1239. const struct sys_reg_desc table[],
  1240. unsigned int num)
  1241. {
  1242. unsigned long pval = reg_to_match_value(params);
  1243. return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
  1244. }
  1245. int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1246. {
  1247. kvm_inject_undefined(vcpu);
  1248. return 1;
  1249. }
  1250. static void perform_access(struct kvm_vcpu *vcpu,
  1251. struct sys_reg_params *params,
  1252. const struct sys_reg_desc *r)
  1253. {
  1254. /*
  1255. * Not having an accessor means that we have configured a trap
  1256. * that we don't know how to handle. This certainly qualifies
  1257. * as a gross bug that should be fixed right away.
  1258. */
  1259. BUG_ON(!r->access);
  1260. /* Skip instruction if instructed so */
  1261. if (likely(r->access(vcpu, params, r)))
  1262. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  1263. }
  1264. /*
  1265. * emulate_cp -- tries to match a sys_reg access in a handling table, and
  1266. * call the corresponding trap handler.
  1267. *
  1268. * @params: pointer to the descriptor of the access
  1269. * @table: array of trap descriptors
  1270. * @num: size of the trap descriptor array
  1271. *
  1272. * Return 0 if the access has been handled, and -1 if not.
  1273. */
  1274. static int emulate_cp(struct kvm_vcpu *vcpu,
  1275. struct sys_reg_params *params,
  1276. const struct sys_reg_desc *table,
  1277. size_t num)
  1278. {
  1279. const struct sys_reg_desc *r;
  1280. if (!table)
  1281. return -1; /* Not handled */
  1282. r = find_reg(params, table, num);
  1283. if (r) {
  1284. perform_access(vcpu, params, r);
  1285. return 0;
  1286. }
  1287. /* Not handled */
  1288. return -1;
  1289. }
  1290. static void unhandled_cp_access(struct kvm_vcpu *vcpu,
  1291. struct sys_reg_params *params)
  1292. {
  1293. u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
  1294. int cp = -1;
  1295. switch(hsr_ec) {
  1296. case ESR_ELx_EC_CP15_32:
  1297. case ESR_ELx_EC_CP15_64:
  1298. cp = 15;
  1299. break;
  1300. case ESR_ELx_EC_CP14_MR:
  1301. case ESR_ELx_EC_CP14_64:
  1302. cp = 14;
  1303. break;
  1304. default:
  1305. WARN_ON(1);
  1306. }
  1307. kvm_err("Unsupported guest CP%d access at: %08lx\n",
  1308. cp, *vcpu_pc(vcpu));
  1309. print_sys_reg_instr(params);
  1310. kvm_inject_undefined(vcpu);
  1311. }
  1312. /**
  1313. * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
  1314. * @vcpu: The VCPU pointer
  1315. * @run: The kvm_run struct
  1316. */
  1317. static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
  1318. const struct sys_reg_desc *global,
  1319. size_t nr_global,
  1320. const struct sys_reg_desc *target_specific,
  1321. size_t nr_specific)
  1322. {
  1323. struct sys_reg_params params;
  1324. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  1325. int Rt = kvm_vcpu_sys_get_rt(vcpu);
  1326. int Rt2 = (hsr >> 10) & 0x1f;
  1327. params.is_aarch32 = true;
  1328. params.is_32bit = false;
  1329. params.CRm = (hsr >> 1) & 0xf;
  1330. params.is_write = ((hsr & 1) == 0);
  1331. params.Op0 = 0;
  1332. params.Op1 = (hsr >> 16) & 0xf;
  1333. params.Op2 = 0;
  1334. params.CRn = 0;
  1335. /*
  1336. * Make a 64-bit value out of Rt and Rt2. As we use the same trap
  1337. * backends between AArch32 and AArch64, we get away with it.
  1338. */
  1339. if (params.is_write) {
  1340. params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
  1341. params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
  1342. }
  1343. /*
  1344. * Try to emulate the coprocessor access using the target
  1345. * specific table first, and using the global table afterwards.
  1346. * If either of the tables contains a handler, handle the
  1347. * potential register operation in the case of a read and return
  1348. * with success.
  1349. */
  1350. if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
  1351. !emulate_cp(vcpu, &params, global, nr_global)) {
  1352. /* Split up the value between registers for the read side */
  1353. if (!params.is_write) {
  1354. vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
  1355. vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
  1356. }
  1357. return 1;
  1358. }
  1359. unhandled_cp_access(vcpu, &params);
  1360. return 1;
  1361. }
  1362. /**
  1363. * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
  1364. * @vcpu: The VCPU pointer
  1365. * @run: The kvm_run struct
  1366. */
  1367. static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
  1368. const struct sys_reg_desc *global,
  1369. size_t nr_global,
  1370. const struct sys_reg_desc *target_specific,
  1371. size_t nr_specific)
  1372. {
  1373. struct sys_reg_params params;
  1374. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  1375. int Rt = kvm_vcpu_sys_get_rt(vcpu);
  1376. params.is_aarch32 = true;
  1377. params.is_32bit = true;
  1378. params.CRm = (hsr >> 1) & 0xf;
  1379. params.regval = vcpu_get_reg(vcpu, Rt);
  1380. params.is_write = ((hsr & 1) == 0);
  1381. params.CRn = (hsr >> 10) & 0xf;
  1382. params.Op0 = 0;
  1383. params.Op1 = (hsr >> 14) & 0x7;
  1384. params.Op2 = (hsr >> 17) & 0x7;
  1385. if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
  1386. !emulate_cp(vcpu, &params, global, nr_global)) {
  1387. if (!params.is_write)
  1388. vcpu_set_reg(vcpu, Rt, params.regval);
  1389. return 1;
  1390. }
  1391. unhandled_cp_access(vcpu, &params);
  1392. return 1;
  1393. }
  1394. int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1395. {
  1396. const struct sys_reg_desc *target_specific;
  1397. size_t num;
  1398. target_specific = get_target_table(vcpu->arch.target, false, &num);
  1399. return kvm_handle_cp_64(vcpu,
  1400. cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
  1401. target_specific, num);
  1402. }
  1403. int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1404. {
  1405. const struct sys_reg_desc *target_specific;
  1406. size_t num;
  1407. target_specific = get_target_table(vcpu->arch.target, false, &num);
  1408. return kvm_handle_cp_32(vcpu,
  1409. cp15_regs, ARRAY_SIZE(cp15_regs),
  1410. target_specific, num);
  1411. }
  1412. int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1413. {
  1414. return kvm_handle_cp_64(vcpu,
  1415. cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
  1416. NULL, 0);
  1417. }
  1418. int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1419. {
  1420. return kvm_handle_cp_32(vcpu,
  1421. cp14_regs, ARRAY_SIZE(cp14_regs),
  1422. NULL, 0);
  1423. }
  1424. static int emulate_sys_reg(struct kvm_vcpu *vcpu,
  1425. struct sys_reg_params *params)
  1426. {
  1427. size_t num;
  1428. const struct sys_reg_desc *table, *r;
  1429. table = get_target_table(vcpu->arch.target, true, &num);
  1430. /* Search target-specific then generic table. */
  1431. r = find_reg(params, table, num);
  1432. if (!r)
  1433. r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  1434. if (likely(r)) {
  1435. perform_access(vcpu, params, r);
  1436. } else {
  1437. kvm_err("Unsupported guest sys_reg access at: %lx\n",
  1438. *vcpu_pc(vcpu));
  1439. print_sys_reg_instr(params);
  1440. kvm_inject_undefined(vcpu);
  1441. }
  1442. return 1;
  1443. }
  1444. static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
  1445. const struct sys_reg_desc *table, size_t num)
  1446. {
  1447. unsigned long i;
  1448. for (i = 0; i < num; i++)
  1449. if (table[i].reset)
  1450. table[i].reset(vcpu, &table[i]);
  1451. }
  1452. /**
  1453. * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
  1454. * @vcpu: The VCPU pointer
  1455. * @run: The kvm_run struct
  1456. */
  1457. int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1458. {
  1459. struct sys_reg_params params;
  1460. unsigned long esr = kvm_vcpu_get_hsr(vcpu);
  1461. int Rt = kvm_vcpu_sys_get_rt(vcpu);
  1462. int ret;
  1463. trace_kvm_handle_sys_reg(esr);
  1464. params.is_aarch32 = false;
  1465. params.is_32bit = false;
  1466. params.Op0 = (esr >> 20) & 3;
  1467. params.Op1 = (esr >> 14) & 0x7;
  1468. params.CRn = (esr >> 10) & 0xf;
  1469. params.CRm = (esr >> 1) & 0xf;
  1470. params.Op2 = (esr >> 17) & 0x7;
  1471. params.regval = vcpu_get_reg(vcpu, Rt);
  1472. params.is_write = !(esr & 1);
  1473. ret = emulate_sys_reg(vcpu, &params);
  1474. if (!params.is_write)
  1475. vcpu_set_reg(vcpu, Rt, params.regval);
  1476. return ret;
  1477. }
  1478. /******************************************************************************
  1479. * Userspace API
  1480. *****************************************************************************/
  1481. static bool index_to_params(u64 id, struct sys_reg_params *params)
  1482. {
  1483. switch (id & KVM_REG_SIZE_MASK) {
  1484. case KVM_REG_SIZE_U64:
  1485. /* Any unused index bits means it's not valid. */
  1486. if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
  1487. | KVM_REG_ARM_COPROC_MASK
  1488. | KVM_REG_ARM64_SYSREG_OP0_MASK
  1489. | KVM_REG_ARM64_SYSREG_OP1_MASK
  1490. | KVM_REG_ARM64_SYSREG_CRN_MASK
  1491. | KVM_REG_ARM64_SYSREG_CRM_MASK
  1492. | KVM_REG_ARM64_SYSREG_OP2_MASK))
  1493. return false;
  1494. params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
  1495. >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
  1496. params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
  1497. >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
  1498. params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
  1499. >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
  1500. params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
  1501. >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
  1502. params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
  1503. >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
  1504. return true;
  1505. default:
  1506. return false;
  1507. }
  1508. }
  1509. const struct sys_reg_desc *find_reg_by_id(u64 id,
  1510. struct sys_reg_params *params,
  1511. const struct sys_reg_desc table[],
  1512. unsigned int num)
  1513. {
  1514. if (!index_to_params(id, params))
  1515. return NULL;
  1516. return find_reg(params, table, num);
  1517. }
  1518. /* Decode an index value, and find the sys_reg_desc entry. */
  1519. static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
  1520. u64 id)
  1521. {
  1522. size_t num;
  1523. const struct sys_reg_desc *table, *r;
  1524. struct sys_reg_params params;
  1525. /* We only do sys_reg for now. */
  1526. if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
  1527. return NULL;
  1528. if (!index_to_params(id, &params))
  1529. return NULL;
  1530. table = get_target_table(vcpu->arch.target, true, &num);
  1531. r = find_reg(&params, table, num);
  1532. if (!r)
  1533. r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  1534. /* Not saved in the sys_reg array? */
  1535. if (r && !r->reg)
  1536. r = NULL;
  1537. return r;
  1538. }
  1539. /*
  1540. * These are the invariant sys_reg registers: we let the guest see the
  1541. * host versions of these, so they're part of the guest state.
  1542. *
  1543. * A future CPU may provide a mechanism to present different values to
  1544. * the guest, or a future kvm may trap them.
  1545. */
  1546. #define FUNCTION_INVARIANT(reg) \
  1547. static void get_##reg(struct kvm_vcpu *v, \
  1548. const struct sys_reg_desc *r) \
  1549. { \
  1550. ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
  1551. }
  1552. FUNCTION_INVARIANT(midr_el1)
  1553. FUNCTION_INVARIANT(ctr_el0)
  1554. FUNCTION_INVARIANT(revidr_el1)
  1555. FUNCTION_INVARIANT(id_pfr0_el1)
  1556. FUNCTION_INVARIANT(id_pfr1_el1)
  1557. FUNCTION_INVARIANT(id_dfr0_el1)
  1558. FUNCTION_INVARIANT(id_afr0_el1)
  1559. FUNCTION_INVARIANT(id_mmfr0_el1)
  1560. FUNCTION_INVARIANT(id_mmfr1_el1)
  1561. FUNCTION_INVARIANT(id_mmfr2_el1)
  1562. FUNCTION_INVARIANT(id_mmfr3_el1)
  1563. FUNCTION_INVARIANT(id_isar0_el1)
  1564. FUNCTION_INVARIANT(id_isar1_el1)
  1565. FUNCTION_INVARIANT(id_isar2_el1)
  1566. FUNCTION_INVARIANT(id_isar3_el1)
  1567. FUNCTION_INVARIANT(id_isar4_el1)
  1568. FUNCTION_INVARIANT(id_isar5_el1)
  1569. FUNCTION_INVARIANT(clidr_el1)
  1570. FUNCTION_INVARIANT(aidr_el1)
  1571. /* ->val is filled in by kvm_sys_reg_table_init() */
  1572. static struct sys_reg_desc invariant_sys_regs[] = {
  1573. { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
  1574. { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
  1575. { SYS_DESC(SYS_ID_PFR0_EL1), NULL, get_id_pfr0_el1 },
  1576. { SYS_DESC(SYS_ID_PFR1_EL1), NULL, get_id_pfr1_el1 },
  1577. { SYS_DESC(SYS_ID_DFR0_EL1), NULL, get_id_dfr0_el1 },
  1578. { SYS_DESC(SYS_ID_AFR0_EL1), NULL, get_id_afr0_el1 },
  1579. { SYS_DESC(SYS_ID_MMFR0_EL1), NULL, get_id_mmfr0_el1 },
  1580. { SYS_DESC(SYS_ID_MMFR1_EL1), NULL, get_id_mmfr1_el1 },
  1581. { SYS_DESC(SYS_ID_MMFR2_EL1), NULL, get_id_mmfr2_el1 },
  1582. { SYS_DESC(SYS_ID_MMFR3_EL1), NULL, get_id_mmfr3_el1 },
  1583. { SYS_DESC(SYS_ID_ISAR0_EL1), NULL, get_id_isar0_el1 },
  1584. { SYS_DESC(SYS_ID_ISAR1_EL1), NULL, get_id_isar1_el1 },
  1585. { SYS_DESC(SYS_ID_ISAR2_EL1), NULL, get_id_isar2_el1 },
  1586. { SYS_DESC(SYS_ID_ISAR3_EL1), NULL, get_id_isar3_el1 },
  1587. { SYS_DESC(SYS_ID_ISAR4_EL1), NULL, get_id_isar4_el1 },
  1588. { SYS_DESC(SYS_ID_ISAR5_EL1), NULL, get_id_isar5_el1 },
  1589. { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
  1590. { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
  1591. { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
  1592. };
  1593. static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
  1594. {
  1595. if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
  1596. return -EFAULT;
  1597. return 0;
  1598. }
  1599. static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
  1600. {
  1601. if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
  1602. return -EFAULT;
  1603. return 0;
  1604. }
  1605. static int get_invariant_sys_reg(u64 id, void __user *uaddr)
  1606. {
  1607. struct sys_reg_params params;
  1608. const struct sys_reg_desc *r;
  1609. r = find_reg_by_id(id, &params, invariant_sys_regs,
  1610. ARRAY_SIZE(invariant_sys_regs));
  1611. if (!r)
  1612. return -ENOENT;
  1613. return reg_to_user(uaddr, &r->val, id);
  1614. }
  1615. static int set_invariant_sys_reg(u64 id, void __user *uaddr)
  1616. {
  1617. struct sys_reg_params params;
  1618. const struct sys_reg_desc *r;
  1619. int err;
  1620. u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
  1621. r = find_reg_by_id(id, &params, invariant_sys_regs,
  1622. ARRAY_SIZE(invariant_sys_regs));
  1623. if (!r)
  1624. return -ENOENT;
  1625. err = reg_from_user(&val, uaddr, id);
  1626. if (err)
  1627. return err;
  1628. /* This is what we mean by invariant: you can't change it. */
  1629. if (r->val != val)
  1630. return -EINVAL;
  1631. return 0;
  1632. }
  1633. static bool is_valid_cache(u32 val)
  1634. {
  1635. u32 level, ctype;
  1636. if (val >= CSSELR_MAX)
  1637. return false;
  1638. /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
  1639. level = (val >> 1);
  1640. ctype = (cache_levels >> (level * 3)) & 7;
  1641. switch (ctype) {
  1642. case 0: /* No cache */
  1643. return false;
  1644. case 1: /* Instruction cache only */
  1645. return (val & 1);
  1646. case 2: /* Data cache only */
  1647. case 4: /* Unified cache */
  1648. return !(val & 1);
  1649. case 3: /* Separate instruction and data caches */
  1650. return true;
  1651. default: /* Reserved: we can't know instruction or data. */
  1652. return false;
  1653. }
  1654. }
  1655. static int demux_c15_get(u64 id, void __user *uaddr)
  1656. {
  1657. u32 val;
  1658. u32 __user *uval = uaddr;
  1659. /* Fail if we have unknown bits set. */
  1660. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  1661. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  1662. return -ENOENT;
  1663. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  1664. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  1665. if (KVM_REG_SIZE(id) != 4)
  1666. return -ENOENT;
  1667. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  1668. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  1669. if (!is_valid_cache(val))
  1670. return -ENOENT;
  1671. return put_user(get_ccsidr(val), uval);
  1672. default:
  1673. return -ENOENT;
  1674. }
  1675. }
  1676. static int demux_c15_set(u64 id, void __user *uaddr)
  1677. {
  1678. u32 val, newval;
  1679. u32 __user *uval = uaddr;
  1680. /* Fail if we have unknown bits set. */
  1681. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  1682. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  1683. return -ENOENT;
  1684. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  1685. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  1686. if (KVM_REG_SIZE(id) != 4)
  1687. return -ENOENT;
  1688. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  1689. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  1690. if (!is_valid_cache(val))
  1691. return -ENOENT;
  1692. if (get_user(newval, uval))
  1693. return -EFAULT;
  1694. /* This is also invariant: you can't change it. */
  1695. if (newval != get_ccsidr(val))
  1696. return -EINVAL;
  1697. return 0;
  1698. default:
  1699. return -ENOENT;
  1700. }
  1701. }
  1702. int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  1703. {
  1704. const struct sys_reg_desc *r;
  1705. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  1706. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  1707. return demux_c15_get(reg->id, uaddr);
  1708. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  1709. return -ENOENT;
  1710. r = index_to_sys_reg_desc(vcpu, reg->id);
  1711. if (!r)
  1712. return get_invariant_sys_reg(reg->id, uaddr);
  1713. if (r->get_user)
  1714. return (r->get_user)(vcpu, r, reg, uaddr);
  1715. return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
  1716. }
  1717. int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  1718. {
  1719. const struct sys_reg_desc *r;
  1720. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  1721. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  1722. return demux_c15_set(reg->id, uaddr);
  1723. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  1724. return -ENOENT;
  1725. r = index_to_sys_reg_desc(vcpu, reg->id);
  1726. if (!r)
  1727. return set_invariant_sys_reg(reg->id, uaddr);
  1728. if (r->set_user)
  1729. return (r->set_user)(vcpu, r, reg, uaddr);
  1730. return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
  1731. }
  1732. static unsigned int num_demux_regs(void)
  1733. {
  1734. unsigned int i, count = 0;
  1735. for (i = 0; i < CSSELR_MAX; i++)
  1736. if (is_valid_cache(i))
  1737. count++;
  1738. return count;
  1739. }
  1740. static int write_demux_regids(u64 __user *uindices)
  1741. {
  1742. u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
  1743. unsigned int i;
  1744. val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
  1745. for (i = 0; i < CSSELR_MAX; i++) {
  1746. if (!is_valid_cache(i))
  1747. continue;
  1748. if (put_user(val | i, uindices))
  1749. return -EFAULT;
  1750. uindices++;
  1751. }
  1752. return 0;
  1753. }
  1754. static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
  1755. {
  1756. return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
  1757. KVM_REG_ARM64_SYSREG |
  1758. (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
  1759. (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
  1760. (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
  1761. (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
  1762. (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
  1763. }
  1764. static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
  1765. {
  1766. if (!*uind)
  1767. return true;
  1768. if (put_user(sys_reg_to_index(reg), *uind))
  1769. return false;
  1770. (*uind)++;
  1771. return true;
  1772. }
  1773. /* Assumed ordered tables, see kvm_sys_reg_table_init. */
  1774. static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
  1775. {
  1776. const struct sys_reg_desc *i1, *i2, *end1, *end2;
  1777. unsigned int total = 0;
  1778. size_t num;
  1779. /* We check for duplicates here, to allow arch-specific overrides. */
  1780. i1 = get_target_table(vcpu->arch.target, true, &num);
  1781. end1 = i1 + num;
  1782. i2 = sys_reg_descs;
  1783. end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
  1784. BUG_ON(i1 == end1 || i2 == end2);
  1785. /* Walk carefully, as both tables may refer to the same register. */
  1786. while (i1 || i2) {
  1787. int cmp = cmp_sys_reg(i1, i2);
  1788. /* target-specific overrides generic entry. */
  1789. if (cmp <= 0) {
  1790. /* Ignore registers we trap but don't save. */
  1791. if (i1->reg) {
  1792. if (!copy_reg_to_user(i1, &uind))
  1793. return -EFAULT;
  1794. total++;
  1795. }
  1796. } else {
  1797. /* Ignore registers we trap but don't save. */
  1798. if (i2->reg) {
  1799. if (!copy_reg_to_user(i2, &uind))
  1800. return -EFAULT;
  1801. total++;
  1802. }
  1803. }
  1804. if (cmp <= 0 && ++i1 == end1)
  1805. i1 = NULL;
  1806. if (cmp >= 0 && ++i2 == end2)
  1807. i2 = NULL;
  1808. }
  1809. return total;
  1810. }
  1811. unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
  1812. {
  1813. return ARRAY_SIZE(invariant_sys_regs)
  1814. + num_demux_regs()
  1815. + walk_sys_regs(vcpu, (u64 __user *)NULL);
  1816. }
  1817. int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
  1818. {
  1819. unsigned int i;
  1820. int err;
  1821. /* Then give them all the invariant registers' indices. */
  1822. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
  1823. if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
  1824. return -EFAULT;
  1825. uindices++;
  1826. }
  1827. err = walk_sys_regs(vcpu, uindices);
  1828. if (err < 0)
  1829. return err;
  1830. uindices += err;
  1831. return write_demux_regids(uindices);
  1832. }
  1833. static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
  1834. {
  1835. unsigned int i;
  1836. for (i = 1; i < n; i++) {
  1837. if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
  1838. kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
  1839. return 1;
  1840. }
  1841. }
  1842. return 0;
  1843. }
  1844. void kvm_sys_reg_table_init(void)
  1845. {
  1846. unsigned int i;
  1847. struct sys_reg_desc clidr;
  1848. /* Make sure tables are unique and in order. */
  1849. BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
  1850. BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
  1851. BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
  1852. BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
  1853. BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
  1854. BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
  1855. /* We abuse the reset function to overwrite the table itself. */
  1856. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
  1857. invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
  1858. /*
  1859. * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
  1860. *
  1861. * If software reads the Cache Type fields from Ctype1
  1862. * upwards, once it has seen a value of 0b000, no caches
  1863. * exist at further-out levels of the hierarchy. So, for
  1864. * example, if Ctype3 is the first Cache Type field with a
  1865. * value of 0b000, the values of Ctype4 to Ctype7 must be
  1866. * ignored.
  1867. */
  1868. get_clidr_el1(NULL, &clidr); /* Ugly... */
  1869. cache_levels = clidr.val;
  1870. for (i = 0; i < 7; i++)
  1871. if (((cache_levels >> (i*3)) & 7) == 0)
  1872. break;
  1873. /* Clear all higher bits. */
  1874. cache_levels &= (1 << (i*3))-1;
  1875. }
  1876. /**
  1877. * kvm_reset_sys_regs - sets system registers to reset value
  1878. * @vcpu: The VCPU pointer
  1879. *
  1880. * This function finds the right table above and sets the registers on the
  1881. * virtual CPU struct to their architecturally defined reset values.
  1882. */
  1883. void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
  1884. {
  1885. size_t num;
  1886. const struct sys_reg_desc *table;
  1887. /* Catch someone adding a register without putting in reset entry. */
  1888. memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
  1889. /* Generic chip reset first (so target could override). */
  1890. reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  1891. table = get_target_table(vcpu->arch.target, true, &num);
  1892. reset_sys_reg_descs(vcpu, table, num);
  1893. for (num = 1; num < NR_SYS_REGS; num++)
  1894. if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
  1895. panic("Didn't reset vcpu_sys_reg(%zi)", num);
  1896. }