vgic-v3-sr.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130
  1. /*
  2. * Copyright (C) 2012-2015 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/compiler.h>
  18. #include <linux/irqchip/arm-gic-v3.h>
  19. #include <linux/kvm_host.h>
  20. #include <asm/kvm_emulate.h>
  21. #include <asm/kvm_hyp.h>
  22. #include <asm/kvm_mmu.h>
  23. #define vtr_to_max_lr_idx(v) ((v) & 0xf)
  24. #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
  25. #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
  26. static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
  27. {
  28. switch (lr & 0xf) {
  29. case 0:
  30. return read_gicreg(ICH_LR0_EL2);
  31. case 1:
  32. return read_gicreg(ICH_LR1_EL2);
  33. case 2:
  34. return read_gicreg(ICH_LR2_EL2);
  35. case 3:
  36. return read_gicreg(ICH_LR3_EL2);
  37. case 4:
  38. return read_gicreg(ICH_LR4_EL2);
  39. case 5:
  40. return read_gicreg(ICH_LR5_EL2);
  41. case 6:
  42. return read_gicreg(ICH_LR6_EL2);
  43. case 7:
  44. return read_gicreg(ICH_LR7_EL2);
  45. case 8:
  46. return read_gicreg(ICH_LR8_EL2);
  47. case 9:
  48. return read_gicreg(ICH_LR9_EL2);
  49. case 10:
  50. return read_gicreg(ICH_LR10_EL2);
  51. case 11:
  52. return read_gicreg(ICH_LR11_EL2);
  53. case 12:
  54. return read_gicreg(ICH_LR12_EL2);
  55. case 13:
  56. return read_gicreg(ICH_LR13_EL2);
  57. case 14:
  58. return read_gicreg(ICH_LR14_EL2);
  59. case 15:
  60. return read_gicreg(ICH_LR15_EL2);
  61. }
  62. unreachable();
  63. }
  64. static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
  65. {
  66. switch (lr & 0xf) {
  67. case 0:
  68. write_gicreg(val, ICH_LR0_EL2);
  69. break;
  70. case 1:
  71. write_gicreg(val, ICH_LR1_EL2);
  72. break;
  73. case 2:
  74. write_gicreg(val, ICH_LR2_EL2);
  75. break;
  76. case 3:
  77. write_gicreg(val, ICH_LR3_EL2);
  78. break;
  79. case 4:
  80. write_gicreg(val, ICH_LR4_EL2);
  81. break;
  82. case 5:
  83. write_gicreg(val, ICH_LR5_EL2);
  84. break;
  85. case 6:
  86. write_gicreg(val, ICH_LR6_EL2);
  87. break;
  88. case 7:
  89. write_gicreg(val, ICH_LR7_EL2);
  90. break;
  91. case 8:
  92. write_gicreg(val, ICH_LR8_EL2);
  93. break;
  94. case 9:
  95. write_gicreg(val, ICH_LR9_EL2);
  96. break;
  97. case 10:
  98. write_gicreg(val, ICH_LR10_EL2);
  99. break;
  100. case 11:
  101. write_gicreg(val, ICH_LR11_EL2);
  102. break;
  103. case 12:
  104. write_gicreg(val, ICH_LR12_EL2);
  105. break;
  106. case 13:
  107. write_gicreg(val, ICH_LR13_EL2);
  108. break;
  109. case 14:
  110. write_gicreg(val, ICH_LR14_EL2);
  111. break;
  112. case 15:
  113. write_gicreg(val, ICH_LR15_EL2);
  114. break;
  115. }
  116. }
  117. static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n)
  118. {
  119. switch (n) {
  120. case 0:
  121. write_gicreg(val, ICH_AP0R0_EL2);
  122. break;
  123. case 1:
  124. write_gicreg(val, ICH_AP0R1_EL2);
  125. break;
  126. case 2:
  127. write_gicreg(val, ICH_AP0R2_EL2);
  128. break;
  129. case 3:
  130. write_gicreg(val, ICH_AP0R3_EL2);
  131. break;
  132. }
  133. }
  134. static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n)
  135. {
  136. switch (n) {
  137. case 0:
  138. write_gicreg(val, ICH_AP1R0_EL2);
  139. break;
  140. case 1:
  141. write_gicreg(val, ICH_AP1R1_EL2);
  142. break;
  143. case 2:
  144. write_gicreg(val, ICH_AP1R2_EL2);
  145. break;
  146. case 3:
  147. write_gicreg(val, ICH_AP1R3_EL2);
  148. break;
  149. }
  150. }
  151. static u32 __hyp_text __vgic_v3_read_ap0rn(int n)
  152. {
  153. u32 val;
  154. switch (n) {
  155. case 0:
  156. val = read_gicreg(ICH_AP0R0_EL2);
  157. break;
  158. case 1:
  159. val = read_gicreg(ICH_AP0R1_EL2);
  160. break;
  161. case 2:
  162. val = read_gicreg(ICH_AP0R2_EL2);
  163. break;
  164. case 3:
  165. val = read_gicreg(ICH_AP0R3_EL2);
  166. break;
  167. default:
  168. unreachable();
  169. }
  170. return val;
  171. }
  172. static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
  173. {
  174. u32 val;
  175. switch (n) {
  176. case 0:
  177. val = read_gicreg(ICH_AP1R0_EL2);
  178. break;
  179. case 1:
  180. val = read_gicreg(ICH_AP1R1_EL2);
  181. break;
  182. case 2:
  183. val = read_gicreg(ICH_AP1R2_EL2);
  184. break;
  185. case 3:
  186. val = read_gicreg(ICH_AP1R3_EL2);
  187. break;
  188. default:
  189. unreachable();
  190. }
  191. return val;
  192. }
  193. void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
  194. {
  195. struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  196. u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
  197. /*
  198. * Make sure stores to the GIC via the memory mapped interface
  199. * are now visible to the system register interface when reading the
  200. * LRs, and when reading back the VMCR on non-VHE systems.
  201. */
  202. if (used_lrs || !has_vhe()) {
  203. if (!cpu_if->vgic_sre) {
  204. dsb(sy);
  205. isb();
  206. }
  207. }
  208. if (used_lrs) {
  209. int i;
  210. u32 elrsr;
  211. elrsr = read_gicreg(ICH_ELSR_EL2);
  212. write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
  213. for (i = 0; i < used_lrs; i++) {
  214. if (elrsr & (1 << i))
  215. cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
  216. else
  217. cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
  218. __gic_v3_set_lr(0, i);
  219. }
  220. }
  221. }
  222. void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
  223. {
  224. struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  225. u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
  226. int i;
  227. if (used_lrs) {
  228. write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
  229. for (i = 0; i < used_lrs; i++)
  230. __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
  231. }
  232. /*
  233. * Ensure that writes to the LRs, and on non-VHE systems ensure that
  234. * the write to the VMCR in __vgic_v3_activate_traps(), will have
  235. * reached the (re)distributors. This ensure the guest will read the
  236. * correct values from the memory-mapped interface.
  237. */
  238. if (used_lrs || !has_vhe()) {
  239. if (!cpu_if->vgic_sre) {
  240. isb();
  241. dsb(sy);
  242. }
  243. }
  244. }
  245. void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
  246. {
  247. struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  248. /*
  249. * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
  250. * Group0 interrupt (as generated in GICv2 mode) to be
  251. * delivered as a FIQ to the guest, with potentially fatal
  252. * consequences. So we must make sure that ICC_SRE_EL1 has
  253. * been actually programmed with the value we want before
  254. * starting to mess with the rest of the GIC, and VMCR_EL2 in
  255. * particular. This logic must be called before
  256. * __vgic_v3_restore_state().
  257. */
  258. if (!cpu_if->vgic_sre) {
  259. write_gicreg(0, ICC_SRE_EL1);
  260. isb();
  261. write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
  262. if (has_vhe()) {
  263. /*
  264. * Ensure that the write to the VMCR will have reached
  265. * the (re)distributors. This ensure the guest will
  266. * read the correct values from the memory-mapped
  267. * interface.
  268. */
  269. isb();
  270. dsb(sy);
  271. }
  272. }
  273. /*
  274. * Prevent the guest from touching the GIC system registers if
  275. * SRE isn't enabled for GICv3 emulation.
  276. */
  277. write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
  278. ICC_SRE_EL2);
  279. /*
  280. * If we need to trap system registers, we must write
  281. * ICH_HCR_EL2 anyway, even if no interrupts are being
  282. * injected,
  283. */
  284. if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
  285. cpu_if->its_vpe.its_vm)
  286. write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
  287. }
  288. void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
  289. {
  290. struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  291. u64 val;
  292. if (!cpu_if->vgic_sre) {
  293. cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
  294. }
  295. val = read_gicreg(ICC_SRE_EL2);
  296. write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
  297. if (!cpu_if->vgic_sre) {
  298. /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
  299. isb();
  300. write_gicreg(1, ICC_SRE_EL1);
  301. }
  302. /*
  303. * If we were trapping system registers, we enabled the VGIC even if
  304. * no interrupts were being injected, and we disable it again here.
  305. */
  306. if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
  307. cpu_if->its_vpe.its_vm)
  308. write_gicreg(0, ICH_HCR_EL2);
  309. }
  310. void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
  311. {
  312. struct vgic_v3_cpu_if *cpu_if;
  313. u64 val;
  314. u32 nr_pre_bits;
  315. vcpu = kern_hyp_va(vcpu);
  316. cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  317. val = read_gicreg(ICH_VTR_EL2);
  318. nr_pre_bits = vtr_to_nr_pre_bits(val);
  319. switch (nr_pre_bits) {
  320. case 7:
  321. cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
  322. cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
  323. case 6:
  324. cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
  325. default:
  326. cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
  327. }
  328. switch (nr_pre_bits) {
  329. case 7:
  330. cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
  331. cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
  332. case 6:
  333. cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
  334. default:
  335. cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
  336. }
  337. }
  338. void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu)
  339. {
  340. struct vgic_v3_cpu_if *cpu_if;
  341. u64 val;
  342. u32 nr_pre_bits;
  343. vcpu = kern_hyp_va(vcpu);
  344. cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  345. val = read_gicreg(ICH_VTR_EL2);
  346. nr_pre_bits = vtr_to_nr_pre_bits(val);
  347. switch (nr_pre_bits) {
  348. case 7:
  349. __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
  350. __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
  351. case 6:
  352. __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
  353. default:
  354. __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
  355. }
  356. switch (nr_pre_bits) {
  357. case 7:
  358. __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
  359. __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
  360. case 6:
  361. __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
  362. default:
  363. __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
  364. }
  365. }
  366. void __hyp_text __vgic_v3_init_lrs(void)
  367. {
  368. int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
  369. int i;
  370. for (i = 0; i <= max_lr_idx; i++)
  371. __gic_v3_set_lr(0, i);
  372. }
  373. u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
  374. {
  375. return read_gicreg(ICH_VTR_EL2);
  376. }
  377. u64 __hyp_text __vgic_v3_read_vmcr(void)
  378. {
  379. return read_gicreg(ICH_VMCR_EL2);
  380. }
  381. void __hyp_text __vgic_v3_write_vmcr(u32 vmcr)
  382. {
  383. write_gicreg(vmcr, ICH_VMCR_EL2);
  384. }
  385. #ifdef CONFIG_ARM64
  386. static int __hyp_text __vgic_v3_bpr_min(void)
  387. {
  388. /* See Pseudocode for VPriorityGroup */
  389. return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
  390. }
  391. static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu)
  392. {
  393. u32 esr = kvm_vcpu_get_hsr(vcpu);
  394. u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
  395. return crm != 8;
  396. }
  397. #define GICv3_IDLE_PRIORITY 0xff
  398. static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
  399. u32 vmcr,
  400. u64 *lr_val)
  401. {
  402. unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
  403. u8 priority = GICv3_IDLE_PRIORITY;
  404. int i, lr = -1;
  405. for (i = 0; i < used_lrs; i++) {
  406. u64 val = __gic_v3_get_lr(i);
  407. u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
  408. /* Not pending in the state? */
  409. if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
  410. continue;
  411. /* Group-0 interrupt, but Group-0 disabled? */
  412. if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
  413. continue;
  414. /* Group-1 interrupt, but Group-1 disabled? */
  415. if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
  416. continue;
  417. /* Not the highest priority? */
  418. if (lr_prio >= priority)
  419. continue;
  420. /* This is a candidate */
  421. priority = lr_prio;
  422. *lr_val = val;
  423. lr = i;
  424. }
  425. if (lr == -1)
  426. *lr_val = ICC_IAR1_EL1_SPURIOUS;
  427. return lr;
  428. }
  429. static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu,
  430. int intid, u64 *lr_val)
  431. {
  432. unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
  433. int i;
  434. for (i = 0; i < used_lrs; i++) {
  435. u64 val = __gic_v3_get_lr(i);
  436. if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
  437. (val & ICH_LR_ACTIVE_BIT)) {
  438. *lr_val = val;
  439. return i;
  440. }
  441. }
  442. *lr_val = ICC_IAR1_EL1_SPURIOUS;
  443. return -1;
  444. }
  445. static int __hyp_text __vgic_v3_get_highest_active_priority(void)
  446. {
  447. u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
  448. u32 hap = 0;
  449. int i;
  450. for (i = 0; i < nr_apr_regs; i++) {
  451. u32 val;
  452. /*
  453. * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
  454. * contain the active priority levels for this VCPU
  455. * for the maximum number of supported priority
  456. * levels, and we return the full priority level only
  457. * if the BPR is programmed to its minimum, otherwise
  458. * we return a combination of the priority level and
  459. * subpriority, as determined by the setting of the
  460. * BPR, but without the full subpriority.
  461. */
  462. val = __vgic_v3_read_ap0rn(i);
  463. val |= __vgic_v3_read_ap1rn(i);
  464. if (!val) {
  465. hap += 32;
  466. continue;
  467. }
  468. return (hap + __ffs(val)) << __vgic_v3_bpr_min();
  469. }
  470. return GICv3_IDLE_PRIORITY;
  471. }
  472. static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr)
  473. {
  474. return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
  475. }
  476. static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr)
  477. {
  478. unsigned int bpr;
  479. if (vmcr & ICH_VMCR_CBPR_MASK) {
  480. bpr = __vgic_v3_get_bpr0(vmcr);
  481. if (bpr < 7)
  482. bpr++;
  483. } else {
  484. bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
  485. }
  486. return bpr;
  487. }
  488. /*
  489. * Convert a priority to a preemption level, taking the relevant BPR
  490. * into account by zeroing the sub-priority bits.
  491. */
  492. static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
  493. {
  494. unsigned int bpr;
  495. if (!grp)
  496. bpr = __vgic_v3_get_bpr0(vmcr) + 1;
  497. else
  498. bpr = __vgic_v3_get_bpr1(vmcr);
  499. return pri & (GENMASK(7, 0) << bpr);
  500. }
  501. /*
  502. * The priority value is independent of any of the BPR values, so we
  503. * normalize it using the minumal BPR value. This guarantees that no
  504. * matter what the guest does with its BPR, we can always set/get the
  505. * same value of a priority.
  506. */
  507. static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
  508. {
  509. u8 pre, ap;
  510. u32 val;
  511. int apr;
  512. pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
  513. ap = pre >> __vgic_v3_bpr_min();
  514. apr = ap / 32;
  515. if (!grp) {
  516. val = __vgic_v3_read_ap0rn(apr);
  517. __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
  518. } else {
  519. val = __vgic_v3_read_ap1rn(apr);
  520. __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
  521. }
  522. }
  523. static int __hyp_text __vgic_v3_clear_highest_active_priority(void)
  524. {
  525. u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
  526. u32 hap = 0;
  527. int i;
  528. for (i = 0; i < nr_apr_regs; i++) {
  529. u32 ap0, ap1;
  530. int c0, c1;
  531. ap0 = __vgic_v3_read_ap0rn(i);
  532. ap1 = __vgic_v3_read_ap1rn(i);
  533. if (!ap0 && !ap1) {
  534. hap += 32;
  535. continue;
  536. }
  537. c0 = ap0 ? __ffs(ap0) : 32;
  538. c1 = ap1 ? __ffs(ap1) : 32;
  539. /* Always clear the LSB, which is the highest priority */
  540. if (c0 < c1) {
  541. ap0 &= ~BIT(c0);
  542. __vgic_v3_write_ap0rn(ap0, i);
  543. hap += c0;
  544. } else {
  545. ap1 &= ~BIT(c1);
  546. __vgic_v3_write_ap1rn(ap1, i);
  547. hap += c1;
  548. }
  549. /* Rescale to 8 bits of priority */
  550. return hap << __vgic_v3_bpr_min();
  551. }
  552. return GICv3_IDLE_PRIORITY;
  553. }
  554. static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  555. {
  556. u64 lr_val;
  557. u8 lr_prio, pmr;
  558. int lr, grp;
  559. grp = __vgic_v3_get_group(vcpu);
  560. lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
  561. if (lr < 0)
  562. goto spurious;
  563. if (grp != !!(lr_val & ICH_LR_GROUP))
  564. goto spurious;
  565. pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
  566. lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
  567. if (pmr <= lr_prio)
  568. goto spurious;
  569. if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
  570. goto spurious;
  571. lr_val &= ~ICH_LR_STATE;
  572. /* No active state for LPIs */
  573. if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI)
  574. lr_val |= ICH_LR_ACTIVE_BIT;
  575. __gic_v3_set_lr(lr_val, lr);
  576. __vgic_v3_set_active_priority(lr_prio, vmcr, grp);
  577. vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
  578. return;
  579. spurious:
  580. vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
  581. }
  582. static void __hyp_text __vgic_v3_clear_active_lr(int lr, u64 lr_val)
  583. {
  584. lr_val &= ~ICH_LR_ACTIVE_BIT;
  585. if (lr_val & ICH_LR_HW) {
  586. u32 pid;
  587. pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
  588. gic_write_dir(pid);
  589. }
  590. __gic_v3_set_lr(lr_val, lr);
  591. }
  592. static void __hyp_text __vgic_v3_bump_eoicount(void)
  593. {
  594. u32 hcr;
  595. hcr = read_gicreg(ICH_HCR_EL2);
  596. hcr += 1 << ICH_HCR_EOIcount_SHIFT;
  597. write_gicreg(hcr, ICH_HCR_EL2);
  598. }
  599. static void __hyp_text __vgic_v3_write_dir(struct kvm_vcpu *vcpu,
  600. u32 vmcr, int rt)
  601. {
  602. u32 vid = vcpu_get_reg(vcpu, rt);
  603. u64 lr_val;
  604. int lr;
  605. /* EOImode == 0, nothing to be done here */
  606. if (!(vmcr & ICH_VMCR_EOIM_MASK))
  607. return;
  608. /* No deactivate to be performed on an LPI */
  609. if (vid >= VGIC_MIN_LPI)
  610. return;
  611. lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
  612. if (lr == -1) {
  613. __vgic_v3_bump_eoicount();
  614. return;
  615. }
  616. __vgic_v3_clear_active_lr(lr, lr_val);
  617. }
  618. static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  619. {
  620. u32 vid = vcpu_get_reg(vcpu, rt);
  621. u64 lr_val;
  622. u8 lr_prio, act_prio;
  623. int lr, grp;
  624. grp = __vgic_v3_get_group(vcpu);
  625. /* Drop priority in any case */
  626. act_prio = __vgic_v3_clear_highest_active_priority();
  627. /* If EOIing an LPI, no deactivate to be performed */
  628. if (vid >= VGIC_MIN_LPI)
  629. return;
  630. /* EOImode == 1, nothing to be done here */
  631. if (vmcr & ICH_VMCR_EOIM_MASK)
  632. return;
  633. lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
  634. if (lr == -1) {
  635. __vgic_v3_bump_eoicount();
  636. return;
  637. }
  638. lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
  639. /* If priorities or group do not match, the guest has fscked-up. */
  640. if (grp != !!(lr_val & ICH_LR_GROUP) ||
  641. __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio)
  642. return;
  643. /* Let's now perform the deactivation */
  644. __vgic_v3_clear_active_lr(lr, lr_val);
  645. }
  646. static void __hyp_text __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  647. {
  648. vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
  649. }
  650. static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  651. {
  652. vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
  653. }
  654. static void __hyp_text __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  655. {
  656. u64 val = vcpu_get_reg(vcpu, rt);
  657. if (val & 1)
  658. vmcr |= ICH_VMCR_ENG0_MASK;
  659. else
  660. vmcr &= ~ICH_VMCR_ENG0_MASK;
  661. __vgic_v3_write_vmcr(vmcr);
  662. }
  663. static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  664. {
  665. u64 val = vcpu_get_reg(vcpu, rt);
  666. if (val & 1)
  667. vmcr |= ICH_VMCR_ENG1_MASK;
  668. else
  669. vmcr &= ~ICH_VMCR_ENG1_MASK;
  670. __vgic_v3_write_vmcr(vmcr);
  671. }
  672. static void __hyp_text __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  673. {
  674. vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
  675. }
  676. static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  677. {
  678. vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
  679. }
  680. static void __hyp_text __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  681. {
  682. u64 val = vcpu_get_reg(vcpu, rt);
  683. u8 bpr_min = __vgic_v3_bpr_min() - 1;
  684. /* Enforce BPR limiting */
  685. if (val < bpr_min)
  686. val = bpr_min;
  687. val <<= ICH_VMCR_BPR0_SHIFT;
  688. val &= ICH_VMCR_BPR0_MASK;
  689. vmcr &= ~ICH_VMCR_BPR0_MASK;
  690. vmcr |= val;
  691. __vgic_v3_write_vmcr(vmcr);
  692. }
  693. static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  694. {
  695. u64 val = vcpu_get_reg(vcpu, rt);
  696. u8 bpr_min = __vgic_v3_bpr_min();
  697. if (vmcr & ICH_VMCR_CBPR_MASK)
  698. return;
  699. /* Enforce BPR limiting */
  700. if (val < bpr_min)
  701. val = bpr_min;
  702. val <<= ICH_VMCR_BPR1_SHIFT;
  703. val &= ICH_VMCR_BPR1_MASK;
  704. vmcr &= ~ICH_VMCR_BPR1_MASK;
  705. vmcr |= val;
  706. __vgic_v3_write_vmcr(vmcr);
  707. }
  708. static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
  709. {
  710. u32 val;
  711. if (!__vgic_v3_get_group(vcpu))
  712. val = __vgic_v3_read_ap0rn(n);
  713. else
  714. val = __vgic_v3_read_ap1rn(n);
  715. vcpu_set_reg(vcpu, rt, val);
  716. }
  717. static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
  718. {
  719. u32 val = vcpu_get_reg(vcpu, rt);
  720. if (!__vgic_v3_get_group(vcpu))
  721. __vgic_v3_write_ap0rn(val, n);
  722. else
  723. __vgic_v3_write_ap1rn(val, n);
  724. }
  725. static void __hyp_text __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu,
  726. u32 vmcr, int rt)
  727. {
  728. __vgic_v3_read_apxrn(vcpu, rt, 0);
  729. }
  730. static void __hyp_text __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu,
  731. u32 vmcr, int rt)
  732. {
  733. __vgic_v3_read_apxrn(vcpu, rt, 1);
  734. }
  735. static void __hyp_text __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu,
  736. u32 vmcr, int rt)
  737. {
  738. __vgic_v3_read_apxrn(vcpu, rt, 2);
  739. }
  740. static void __hyp_text __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu,
  741. u32 vmcr, int rt)
  742. {
  743. __vgic_v3_read_apxrn(vcpu, rt, 3);
  744. }
  745. static void __hyp_text __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu,
  746. u32 vmcr, int rt)
  747. {
  748. __vgic_v3_write_apxrn(vcpu, rt, 0);
  749. }
  750. static void __hyp_text __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu,
  751. u32 vmcr, int rt)
  752. {
  753. __vgic_v3_write_apxrn(vcpu, rt, 1);
  754. }
  755. static void __hyp_text __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu,
  756. u32 vmcr, int rt)
  757. {
  758. __vgic_v3_write_apxrn(vcpu, rt, 2);
  759. }
  760. static void __hyp_text __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu,
  761. u32 vmcr, int rt)
  762. {
  763. __vgic_v3_write_apxrn(vcpu, rt, 3);
  764. }
  765. static void __hyp_text __vgic_v3_read_hppir(struct kvm_vcpu *vcpu,
  766. u32 vmcr, int rt)
  767. {
  768. u64 lr_val;
  769. int lr, lr_grp, grp;
  770. grp = __vgic_v3_get_group(vcpu);
  771. lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
  772. if (lr == -1)
  773. goto spurious;
  774. lr_grp = !!(lr_val & ICH_LR_GROUP);
  775. if (lr_grp != grp)
  776. lr_val = ICC_IAR1_EL1_SPURIOUS;
  777. spurious:
  778. vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
  779. }
  780. static void __hyp_text __vgic_v3_read_pmr(struct kvm_vcpu *vcpu,
  781. u32 vmcr, int rt)
  782. {
  783. vmcr &= ICH_VMCR_PMR_MASK;
  784. vmcr >>= ICH_VMCR_PMR_SHIFT;
  785. vcpu_set_reg(vcpu, rt, vmcr);
  786. }
  787. static void __hyp_text __vgic_v3_write_pmr(struct kvm_vcpu *vcpu,
  788. u32 vmcr, int rt)
  789. {
  790. u32 val = vcpu_get_reg(vcpu, rt);
  791. val <<= ICH_VMCR_PMR_SHIFT;
  792. val &= ICH_VMCR_PMR_MASK;
  793. vmcr &= ~ICH_VMCR_PMR_MASK;
  794. vmcr |= val;
  795. write_gicreg(vmcr, ICH_VMCR_EL2);
  796. }
  797. static void __hyp_text __vgic_v3_read_rpr(struct kvm_vcpu *vcpu,
  798. u32 vmcr, int rt)
  799. {
  800. u32 val = __vgic_v3_get_highest_active_priority();
  801. vcpu_set_reg(vcpu, rt, val);
  802. }
  803. static void __hyp_text __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu,
  804. u32 vmcr, int rt)
  805. {
  806. u32 vtr, val;
  807. vtr = read_gicreg(ICH_VTR_EL2);
  808. /* PRIbits */
  809. val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
  810. /* IDbits */
  811. val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
  812. /* SEIS */
  813. val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT;
  814. /* A3V */
  815. val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
  816. /* EOImode */
  817. val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT;
  818. /* CBPR */
  819. val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
  820. vcpu_set_reg(vcpu, rt, val);
  821. }
  822. static void __hyp_text __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu,
  823. u32 vmcr, int rt)
  824. {
  825. u32 val = vcpu_get_reg(vcpu, rt);
  826. if (val & ICC_CTLR_EL1_CBPR_MASK)
  827. vmcr |= ICH_VMCR_CBPR_MASK;
  828. else
  829. vmcr &= ~ICH_VMCR_CBPR_MASK;
  830. if (val & ICC_CTLR_EL1_EOImode_MASK)
  831. vmcr |= ICH_VMCR_EOIM_MASK;
  832. else
  833. vmcr &= ~ICH_VMCR_EOIM_MASK;
  834. write_gicreg(vmcr, ICH_VMCR_EL2);
  835. }
  836. int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
  837. {
  838. int rt;
  839. u32 esr;
  840. u32 vmcr;
  841. void (*fn)(struct kvm_vcpu *, u32, int);
  842. bool is_read;
  843. u32 sysreg;
  844. esr = kvm_vcpu_get_hsr(vcpu);
  845. if (vcpu_mode_is_32bit(vcpu)) {
  846. if (!kvm_condition_valid(vcpu))
  847. return 1;
  848. sysreg = esr_cp15_to_sysreg(esr);
  849. } else {
  850. sysreg = esr_sys64_to_sysreg(esr);
  851. }
  852. is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
  853. switch (sysreg) {
  854. case SYS_ICC_IAR0_EL1:
  855. case SYS_ICC_IAR1_EL1:
  856. if (unlikely(!is_read))
  857. return 0;
  858. fn = __vgic_v3_read_iar;
  859. break;
  860. case SYS_ICC_EOIR0_EL1:
  861. case SYS_ICC_EOIR1_EL1:
  862. if (unlikely(is_read))
  863. return 0;
  864. fn = __vgic_v3_write_eoir;
  865. break;
  866. case SYS_ICC_IGRPEN1_EL1:
  867. if (is_read)
  868. fn = __vgic_v3_read_igrpen1;
  869. else
  870. fn = __vgic_v3_write_igrpen1;
  871. break;
  872. case SYS_ICC_BPR1_EL1:
  873. if (is_read)
  874. fn = __vgic_v3_read_bpr1;
  875. else
  876. fn = __vgic_v3_write_bpr1;
  877. break;
  878. case SYS_ICC_AP0Rn_EL1(0):
  879. case SYS_ICC_AP1Rn_EL1(0):
  880. if (is_read)
  881. fn = __vgic_v3_read_apxr0;
  882. else
  883. fn = __vgic_v3_write_apxr0;
  884. break;
  885. case SYS_ICC_AP0Rn_EL1(1):
  886. case SYS_ICC_AP1Rn_EL1(1):
  887. if (is_read)
  888. fn = __vgic_v3_read_apxr1;
  889. else
  890. fn = __vgic_v3_write_apxr1;
  891. break;
  892. case SYS_ICC_AP0Rn_EL1(2):
  893. case SYS_ICC_AP1Rn_EL1(2):
  894. if (is_read)
  895. fn = __vgic_v3_read_apxr2;
  896. else
  897. fn = __vgic_v3_write_apxr2;
  898. break;
  899. case SYS_ICC_AP0Rn_EL1(3):
  900. case SYS_ICC_AP1Rn_EL1(3):
  901. if (is_read)
  902. fn = __vgic_v3_read_apxr3;
  903. else
  904. fn = __vgic_v3_write_apxr3;
  905. break;
  906. case SYS_ICC_HPPIR0_EL1:
  907. case SYS_ICC_HPPIR1_EL1:
  908. if (unlikely(!is_read))
  909. return 0;
  910. fn = __vgic_v3_read_hppir;
  911. break;
  912. case SYS_ICC_IGRPEN0_EL1:
  913. if (is_read)
  914. fn = __vgic_v3_read_igrpen0;
  915. else
  916. fn = __vgic_v3_write_igrpen0;
  917. break;
  918. case SYS_ICC_BPR0_EL1:
  919. if (is_read)
  920. fn = __vgic_v3_read_bpr0;
  921. else
  922. fn = __vgic_v3_write_bpr0;
  923. break;
  924. case SYS_ICC_DIR_EL1:
  925. if (unlikely(is_read))
  926. return 0;
  927. fn = __vgic_v3_write_dir;
  928. break;
  929. case SYS_ICC_RPR_EL1:
  930. if (unlikely(!is_read))
  931. return 0;
  932. fn = __vgic_v3_read_rpr;
  933. break;
  934. case SYS_ICC_CTLR_EL1:
  935. if (is_read)
  936. fn = __vgic_v3_read_ctlr;
  937. else
  938. fn = __vgic_v3_write_ctlr;
  939. break;
  940. case SYS_ICC_PMR_EL1:
  941. if (is_read)
  942. fn = __vgic_v3_read_pmr;
  943. else
  944. fn = __vgic_v3_write_pmr;
  945. break;
  946. default:
  947. return 0;
  948. }
  949. vmcr = __vgic_v3_read_vmcr();
  950. rt = kvm_vcpu_sys_get_rt(vcpu);
  951. fn(vcpu, vmcr, rt);
  952. return 1;
  953. }
  954. #endif